InternalCallVerifier EqualityVerifier
@Test public void testMultipleReturnCodeMerging() throws Exception {
RefreshHandler handlerOne=Mockito.mock(RefreshHandler.class);
Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(),Mockito.any(String[].class))).toReturn(new RefreshResponse(23,"Twenty Three"));
RefreshHandler handlerTwo=Mockito.mock(RefreshHandler.class);
Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(),Mockito.any(String[].class))).toReturn(new RefreshResponse(10,"Ten"));
RefreshRegistry.defaultRegistry().register("shared",handlerOne);
RefreshRegistry.defaultRegistry().register("shared",handlerTwo);
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"shared"};
int exitCode=admin.run(args);
assertEquals(-1,exitCode);
Mockito.verify(handlerOne).handleRefresh("shared",new String[]{});
Mockito.verify(handlerTwo).handleRefresh("shared",new String[]{});
RefreshRegistry.defaultRegistry().unregisterAll("shared");
}
InternalCallVerifier EqualityVerifier
@Test public void testUnregistration() throws Exception {
RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"firstHandler"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should return -1",-1,exitCode);
}
InternalCallVerifier EqualityVerifier
@Test public void testMultipleRegistration() throws Exception {
RefreshRegistry.defaultRegistry().register("sharedId",firstHandler);
RefreshRegistry.defaultRegistry().register("sharedId",secondHandler);
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"sharedId","one"};
int exitCode=admin.run(args);
assertEquals(-1,exitCode);
Mockito.verify(firstHandler).handleRefresh("sharedId",new String[]{"one"});
Mockito.verify(secondHandler).handleRefresh("sharedId",new String[]{"one"});
RefreshRegistry.defaultRegistry().unregisterAll("sharedId");
}
InternalCallVerifier EqualityVerifier
@Test public void testExceptionResultsInNormalError() throws Exception {
RefreshHandler exceptionalHandler=Mockito.mock(RefreshHandler.class);
Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(),Mockito.any(String[].class))).toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
RefreshHandler otherExceptionalHandler=Mockito.mock(RefreshHandler.class);
Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(),Mockito.any(String[].class))).toThrow(new RuntimeException("More Exceptions"));
RefreshRegistry.defaultRegistry().register("exceptional",exceptionalHandler);
RefreshRegistry.defaultRegistry().register("exceptional",otherExceptionalHandler);
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"exceptional"};
int exitCode=admin.run(args);
assertEquals(-1,exitCode);
Mockito.verify(exceptionalHandler).handleRefresh("exceptional",new String[]{});
Mockito.verify(otherExceptionalHandler).handleRefresh("exceptional",new String[]{});
RefreshRegistry.defaultRegistry().unregisterAll("exceptional");
}
InternalCallVerifier EqualityVerifier
@Test public void testInvalidIdentifier() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"unregisteredIdentity"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should fail due to no handler registered",-1,exitCode);
}
InternalCallVerifier EqualityVerifier
@Test public void testInvalidCommand() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","nn"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should fail due to bad args",-1,exitCode);
}
InternalCallVerifier EqualityVerifier
@Test public void testVariableArgs() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"secondHandler","one"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should return 2",2,exitCode);
exitCode=admin.run(new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"secondHandler","one","two"});
assertEquals("DFSAdmin should now return 3",3,exitCode);
Mockito.verify(secondHandler).handleRefresh("secondHandler",new String[]{"one"});
Mockito.verify(secondHandler).handleRefresh("secondHandler",new String[]{"one","two"});
}
InternalCallVerifier EqualityVerifier
@Test public void testValidIdentifier() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"firstHandler"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should succeed",0,exitCode);
Mockito.verify(firstHandler).handleRefresh("firstHandler",new String[]{});
Mockito.verify(secondHandler,Mockito.never()).handleRefresh(Mockito.anyString(),Mockito.any(String[].class));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRefresh() throws Exception {
assertTrue("Mock queue should have been constructed",mockQueueConstructions > 0);
assertTrue("Puts are routed through MockQueue",canPutInMockQueue());
int lastMockQueueConstructions=mockQueueConstructions;
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshCallQueue"};
int exitCode=admin.run(args);
assertEquals("DFSAdmin should return 0",0,exitCode);
assertEquals("Mock queue should have no additional constructions",lastMockQueueConstructions,mockQueueConstructions);
try {
assertFalse("Puts are routed through LBQ instead of MockQueue",canPutInMockQueue());
}
catch ( IOException ioe) {
fail("Could not put into queue at all");
}
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before @Override public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
dfsCluster.waitClusterUp();
namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
username=System.getProperty("user.name");
fs=dfsCluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before @Override public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
tmpDir=new File(System.getProperty("test.build.data","target"),UUID.randomUUID().toString()).getAbsoluteFile();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks");
dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
dfsCluster.waitClusterUp();
createAKey("mykey",conf);
namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
username=System.getProperty("user.name");
fs=dfsCluster.getFileSystem();
assertTrue("Not an HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
}
APIUtilityVerifier TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before @Override public void setUp() throws Exception {
super.setUp();
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
String[] racks={"/rack1","/rack1","/rack2","/rack2","/rack2","/rack3","/rack4","/rack4"};
String[] hosts={"host1","host2","host3","host4","host5","host6","host7","host8"};
dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(8).racks(racks).hosts(hosts).build();
dfsCluster.waitClusterUp();
namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
username=System.getProperty("user.name");
fs=dfsCluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before @Override public void setUp() throws Exception {
super.setUp();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,HDFSPolicyProvider.class,PolicyProvider.class);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
dfsCluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
dfsCluster.waitClusterUp();
namenode=conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
username=System.getProperty("user.name");
fs=dfsCluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteXml() throws Exception {
StringWriter sw=new StringWriter();
ConfServlet.writeResponse(getTestConf(),sw,"xml");
String xml=sw.toString();
DocumentBuilderFactory docBuilderFactory=DocumentBuilderFactory.newInstance();
DocumentBuilder builder=docBuilderFactory.newDocumentBuilder();
Document doc=builder.parse(new InputSource(new StringReader(xml)));
NodeList nameNodes=doc.getElementsByTagName("name");
boolean foundSetting=false;
for (int i=0; i < nameNodes.getLength(); i++) {
Node nameNode=nameNodes.item(i);
String key=nameNode.getTextContent();
System.err.println("xml key: " + key);
if (TEST_KEY.equals(key)) {
foundSetting=true;
Element propertyElem=(Element)nameNode.getParentNode();
String val=propertyElem.getElementsByTagName("value").item(0).getTextContent();
assertEquals(TEST_VAL,val);
}
}
assertTrue(foundSetting);
}
BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testIteratorWithDeprecatedKeys(){
Configuration conf=new Configuration();
Configuration.addDeprecation("dK",new String[]{"nK"});
conf.set("k","v");
conf.set("dK","V");
assertEquals("V",conf.get("dK"));
assertEquals("V",conf.get("nK"));
conf.set("nK","VV");
assertEquals("VV",conf.get("dK"));
assertEquals("VV",conf.get("nK"));
boolean kFound=false;
boolean dKFound=false;
boolean nKFound=false;
for ( Map.Entry entry : conf) {
if (entry.getKey().equals("k")) {
assertEquals("v",entry.getValue());
kFound=true;
}
if (entry.getKey().equals("dK")) {
assertEquals("VV",entry.getValue());
dKFound=true;
}
if (entry.getKey().equals("nK")) {
assertEquals("VV",entry.getValue());
nKFound=true;
}
}
assertTrue("regular Key not found",kFound);
assertTrue("deprecated Key not found",dKFound);
assertTrue("new Key not found",nKFound);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Run a set of threads making changes to the deprecations
* concurrently with another set of threads calling get()
* and set() on Configuration objects.
*/
@SuppressWarnings("deprecation") @Test(timeout=60000) public void testConcurrentDeprecateAndManipulate() throws Exception {
final int NUM_THREAD_IDS=10;
final int NUM_KEYS_PER_THREAD=1000;
ScheduledThreadPoolExecutor executor=new ScheduledThreadPoolExecutor(2 * NUM_THREAD_IDS,new ThreadFactoryBuilder().setDaemon(true).setNameFormat("testConcurrentDeprecateAndManipulate modification thread %d").build());
final CountDownLatch latch=new CountDownLatch(1);
final AtomicInteger highestModificationThreadId=new AtomicInteger(1);
List> futures=new LinkedList>();
for (int i=0; i < NUM_THREAD_IDS; i++) {
futures.add(executor.schedule(new Callable(){
@Override public Void call() throws Exception {
latch.await();
int threadIndex=highestModificationThreadId.addAndGet(1);
for (int i=0; i < NUM_KEYS_PER_THREAD; i++) {
String testKey=getTestKeyName(threadIndex,i);
String testNewKey=testKey + ".new";
Configuration.addDeprecations(new DeprecationDelta[]{new DeprecationDelta(testKey,testNewKey)});
}
return null;
}
}
,0,TimeUnit.SECONDS));
}
final AtomicInteger highestAccessThreadId=new AtomicInteger(1);
for (int i=0; i < NUM_THREAD_IDS; i++) {
futures.add(executor.schedule(new Callable(){
@Override public Void call() throws Exception {
Configuration conf=new Configuration();
latch.await();
int threadIndex=highestAccessThreadId.addAndGet(1);
for (int i=0; i < NUM_KEYS_PER_THREAD; i++) {
String testNewKey=getTestKeyName(threadIndex,i) + ".new";
String value="value." + threadIndex + "."+ i;
conf.set(testNewKey,value);
Assert.assertEquals(value,conf.get(testNewKey));
}
return null;
}
}
,0,TimeUnit.SECONDS));
}
latch.countDown();
for ( Future future : futures) {
Uninterruptibles.getUninterruptibly(future);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testSetBeforeAndGetAfterDeprecation(){
Configuration conf=new Configuration();
conf.set("oldkey","hello");
Configuration.addDeprecation("oldkey",new String[]{"newkey"});
assertEquals("hello",conf.get("newkey"));
}
InternalCallVerifier EqualityVerifier
@Test public void testSetBeforeAndGetAfterDeprecationAndDefaults(){
Configuration conf=new Configuration();
conf.set("tests.fake-default.old-key","hello");
Configuration.addDeprecation("tests.fake-default.old-key",new String[]{"tests.fake-default.new-key"});
assertEquals("hello",conf.get("tests.fake-default.new-key"));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* This test is to ensure the correctness of loading of keys with respect to
* being marked as final and that are related to deprecation.
* @throws IOException
*/
@Test public void testDeprecationForFinalParameters() throws IOException {
addDeprecationToConfiguration();
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("A","a",true);
appendProperty("D","d");
appendProperty("E","e");
appendProperty("H","h",true);
appendProperty("J","",true);
endConfig();
Path fileResource=new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("a",conf.get("A"));
assertEquals("a",conf.get("B"));
assertEquals("d",conf.get("C"));
assertEquals("d",conf.get("D"));
assertEquals("e",conf.get("E"));
assertEquals("e",conf.get("F"));
assertEquals("h",conf.get("G"));
assertEquals("h",conf.get("H"));
assertNull(conf.get("I"));
assertNull(conf.get("J"));
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
appendProperty("B","b");
appendProperty("C","c",true);
appendProperty("F","f",true);
appendProperty("G","g");
appendProperty("I","i");
endConfig();
Path fileResource1=new Path(CONFIG2);
conf.addResource(fileResource1);
assertEquals("a",conf.get("A"));
assertEquals("a",conf.get("B"));
assertEquals("c",conf.get("C"));
assertEquals("c",conf.get("D"));
assertEquals("f",conf.get("E"));
assertEquals("f",conf.get("F"));
assertEquals("h",conf.get("G"));
assertEquals("h",conf.get("H"));
assertNull(conf.get("I"));
assertNull(conf.get("J"));
out=new BufferedWriter(new FileWriter(CONFIG3));
startConfig();
appendProperty("A","a1");
appendProperty("B","b1");
appendProperty("C","c1");
appendProperty("D","d1");
appendProperty("E","e1");
appendProperty("F","f1");
appendProperty("G","g1");
appendProperty("H","h1");
appendProperty("I","i1");
appendProperty("J","j1");
endConfig();
fileResource=new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("a",conf.get("A"));
assertEquals("a",conf.get("B"));
assertEquals("c",conf.get("C"));
assertEquals("c",conf.get("D"));
assertEquals("f",conf.get("E"));
assertEquals("f",conf.get("F"));
assertEquals("h",conf.get("G"));
assertEquals("h",conf.get("H"));
assertNull(conf.get("I"));
assertNull(conf.get("J"));
}
InternalCallVerifier EqualityVerifier
/**
* This test checks the correctness of loading/setting the properties in terms
* of occurrence of deprecated keys.
* @throws IOException
*/
@Test public void testDeprecation() throws IOException {
addDeprecationToConfiguration();
out=new BufferedWriter(new FileWriter(CONFIG));
startConfig();
appendProperty("A","a");
appendProperty("D","d");
appendProperty("P","p");
endConfig();
Path fileResource=new Path(CONFIG);
conf.addResource(fileResource);
assertEquals("p",conf.get("P"));
assertEquals("p",conf.get("Q"));
assertEquals("p",conf.get("R"));
assertEquals("a",conf.get("A"));
assertEquals("a",conf.get("B"));
assertEquals("d",conf.get("C"));
assertEquals("d",conf.get("D"));
out=new BufferedWriter(new FileWriter(CONFIG2));
startConfig();
appendProperty("B","b");
appendProperty("C","c");
endConfig();
Path fileResource1=new Path(CONFIG2);
conf.addResource(fileResource1);
assertEquals("b",conf.get("A"));
assertEquals("b",conf.get("B"));
assertEquals("c",conf.get("C"));
assertEquals("c",conf.get("D"));
conf.set("N","n");
assertEquals("n",conf.get("M"));
assertEquals(conf.get("M"),conf.get("N"));
conf.set("M","m");
assertEquals("m",conf.get("N"));
conf.set("X","x");
assertEquals("x",conf.get("X"));
assertEquals("x",conf.get("Y"));
assertEquals("x",conf.get("Z"));
conf.set("Y","y");
conf.set("Z","z");
assertEquals("z",conf.get("X"));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testUnsetWithDeprecatedKeys(){
Configuration conf=new Configuration();
Configuration.addDeprecation("dK",new String[]{"nK"});
conf.set("nK","VV");
assertEquals("VV",conf.get("dK"));
assertEquals("VV",conf.get("nK"));
conf.unset("dK");
assertNull(conf.get("dK"));
assertNull(conf.get("nK"));
conf.set("nK","VV");
assertEquals("VV",conf.get("dK"));
assertEquals("VV",conf.get("nK"));
conf.unset("nK");
assertNull(conf.get("dK"));
assertNull(conf.get("nK"));
}
InternalCallVerifier EqualityVerifier
@Test public void testDeprecationSetUnset() throws IOException {
addDeprecationToConfiguration();
Configuration conf=new Configuration();
conf.set("Y","y");
assertEquals("y",conf.get("Z"));
conf.set("X","x");
assertEquals("x",conf.get("Z"));
conf.unset("Y");
assertEquals(null,conf.get("Z"));
assertEquals(null,conf.get("X"));
}
BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testIteratorWithDeprecatedKeysMappedToMultipleNewKeys(){
Configuration conf=new Configuration();
Configuration.addDeprecation("dK",new String[]{"nK1","nK2"});
conf.set("k","v");
conf.set("dK","V");
assertEquals("V",conf.get("dK"));
assertEquals("V",conf.get("nK1"));
assertEquals("V",conf.get("nK2"));
conf.set("nK1","VV");
assertEquals("VV",conf.get("dK"));
assertEquals("VV",conf.get("nK1"));
assertEquals("VV",conf.get("nK2"));
conf.set("nK2","VVV");
assertEquals("VVV",conf.get("dK"));
assertEquals("VVV",conf.get("nK2"));
assertEquals("VVV",conf.get("nK1"));
boolean kFound=false;
boolean dKFound=false;
boolean nK1Found=false;
boolean nK2Found=false;
for ( Map.Entry entry : conf) {
if (entry.getKey().equals("k")) {
assertEquals("v",entry.getValue());
kFound=true;
}
if (entry.getKey().equals("dK")) {
assertEquals("VVV",entry.getValue());
dKFound=true;
}
if (entry.getKey().equals("nK1")) {
assertEquals("VVV",entry.getValue());
nK1Found=true;
}
if (entry.getKey().equals("nK2")) {
assertEquals("VVV",entry.getValue());
nK2Found=true;
}
}
assertTrue("regular Key not found",kFound);
assertTrue("deprecated Key not found",dKFound);
assertTrue("new Key 1 not found",nK1Found);
assertTrue("new Key 2 not found",nK2Found);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testProfileParamsDefaults(){
JobConf configuration=new JobConf();
String result=configuration.getProfileParams();
Assert.assertNotNull(result);
Assert.assertTrue(result.contains("file=%s"));
Assert.assertTrue(result.startsWith("-agentlib:hprof"));
}
InternalCallVerifier EqualityVerifier
/**
* Test that negative values for new configuration keys get passed through.
*/
@Test public void testNegativeValuesForMemoryParams(){
JobConf configuration=new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB,"-5");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-6");
Assert.assertEquals(-5,configuration.getMemoryForMapTask());
Assert.assertEquals(-6,configuration.getMemoryForReduceTask());
}
InternalCallVerifier EqualityVerifier
/**
* Test deprecated accessor and mutator method for mapred.task.maxvmem
*/
@Test public void testMaxVirtualMemoryForTask(){
JobConf configuration=new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(-1));
Assert.assertEquals(configuration.getMaxVirtualMemoryForTask(),300 * 1024 * 1024);
configuration=new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(-1));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(200));
Assert.assertEquals(configuration.getMaxVirtualMemoryForTask(),200 * 1024 * 1024);
configuration=new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(-1));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(-1));
configuration.set("mapred.task.maxvmem",String.valueOf(1 * 1024 * 1024));
Assert.assertEquals(configuration.getMaxVirtualMemoryForTask(),1 * 1024 * 1024);
configuration=new JobConf();
configuration.set("mapred.task.maxvmem",String.valueOf(1 * 1024 * 1024));
Assert.assertEquals(configuration.getMaxVirtualMemoryForTask(),1 * 1024 * 1024);
configuration=new JobConf();
configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024);
Assert.assertEquals(configuration.getMemoryForMapTask(),2);
Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
configuration=new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(400));
configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024);
Assert.assertEquals(configuration.getMemoryForMapTask(),2);
Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
}
InternalCallVerifier EqualityVerifier
/**
* Test that negative values for MAPRED_TASK_MAXVMEM_PROPERTY cause
* new configuration keys' values to be used.
*/
@Test public void testNegativeValueForTaskVmem(){
JobConf configuration=new JobConf();
configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY,"-3");
Assert.assertEquals(MRJobConfig.DEFAULT_MAP_MEMORY_MB,configuration.getMemoryForMapTask());
Assert.assertEquals(MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,configuration.getMemoryForReduceTask());
configuration.set(MRJobConfig.MAP_MEMORY_MB,"4");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"5");
Assert.assertEquals(4,configuration.getMemoryForMapTask());
Assert.assertEquals(5,configuration.getMemoryForReduceTask());
}
InternalCallVerifier EqualityVerifier
@Test public void testProfileParamsSetter(){
JobConf configuration=new JobConf();
configuration.setProfileParams("test");
Assert.assertEquals("test",configuration.get(MRJobConfig.TASK_PROFILE_PARAMS));
}
InternalCallVerifier EqualityVerifier
@Test public void testProfileParamsGetter(){
JobConf configuration=new JobConf();
configuration.set(MRJobConfig.TASK_PROFILE_PARAMS,"test");
Assert.assertEquals("test",configuration.getProfileParams());
}
InternalCallVerifier EqualityVerifier
/**
* Testing mapred.task.maxvmem replacement with new values
*/
@Test public void testMemoryConfigForMapOrReduceTask(){
JobConf configuration=new JobConf();
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
Assert.assertEquals(configuration.getMemoryForMapTask(),300);
Assert.assertEquals(configuration.getMemoryForReduceTask(),300);
configuration.set("mapred.task.maxvmem",String.valueOf(2 * 1024 * 1024));
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
Assert.assertEquals(configuration.getMemoryForMapTask(),2);
Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
configuration=new JobConf();
configuration.set("mapred.task.maxvmem","-1");
configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(400));
Assert.assertEquals(configuration.getMemoryForMapTask(),300);
Assert.assertEquals(configuration.getMemoryForReduceTask(),400);
configuration=new JobConf();
configuration.set("mapred.task.maxvmem",String.valueOf(2 * 1024 * 1024));
configuration.set(MRJobConfig.MAP_MEMORY_MB,"-1");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-1");
Assert.assertEquals(configuration.getMemoryForMapTask(),2);
Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
configuration=new JobConf();
configuration.set("mapred.task.maxvmem",String.valueOf(-1));
configuration.set(MRJobConfig.MAP_MEMORY_MB,"-1");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-1");
Assert.assertEquals(configuration.getMemoryForMapTask(),-1);
Assert.assertEquals(configuration.getMemoryForReduceTask(),-1);
configuration=new JobConf();
configuration.set("mapred.task.maxvmem",String.valueOf(2 * 1024 * 1024));
configuration.set(MRJobConfig.MAP_MEMORY_MB,"3");
configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"3");
Assert.assertEquals(configuration.getMemoryForMapTask(),2);
Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
}
InternalCallVerifier BooleanVerifier
/**
* Test reconfiguring a Reconfigurable.
*/
@Test public void testReconfigure(){
ReconfigurableDummy dummy=new ReconfigurableDummy(conf1);
assertTrue(PROP1 + " set to wrong value ",dummy.getConf().get(PROP1).equals(VAL1));
assertTrue(PROP2 + " set to wrong value ",dummy.getConf().get(PROP2).equals(VAL1));
assertTrue(PROP3 + " set to wrong value ",dummy.getConf().get(PROP3).equals(VAL1));
assertTrue(PROP4 + " set to wrong value ",dummy.getConf().get(PROP4) == null);
assertTrue(PROP5 + " set to wrong value ",dummy.getConf().get(PROP5) == null);
assertTrue(PROP1 + " should be reconfigurable ",dummy.isPropertyReconfigurable(PROP1));
assertTrue(PROP2 + " should be reconfigurable ",dummy.isPropertyReconfigurable(PROP2));
assertFalse(PROP3 + " should not be reconfigurable ",dummy.isPropertyReconfigurable(PROP3));
assertTrue(PROP4 + " should be reconfigurable ",dummy.isPropertyReconfigurable(PROP4));
assertFalse(PROP5 + " should not be reconfigurable ",dummy.isPropertyReconfigurable(PROP5));
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP1,VAL1);
assertTrue(PROP1 + " set to wrong value ",dummy.getConf().get(PROP1).equals(VAL1));
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP1,null);
assertTrue(PROP1 + "set to wrong value ",dummy.getConf().get(PROP1) == null);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP1,VAL2);
assertTrue(PROP1 + "set to wrong value ",dummy.getConf().get(PROP1).equals(VAL2));
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP4,null);
assertTrue(PROP4 + "set to wrong value ",dummy.getConf().get(PROP4) == null);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP4,VAL1);
assertTrue(PROP4 + "set to wrong value ",dummy.getConf().get(PROP4).equals(VAL1));
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertFalse("received unexpected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP5,null);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertTrue("did not receive expected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP5,VAL1);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertTrue("did not receive expected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP3,VAL2);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertTrue("did not receive expected exception",exceptionCaught);
}
{
boolean exceptionCaught=false;
try {
dummy.reconfigureProperty(PROP3,null);
}
catch ( ReconfigurationException e) {
exceptionCaught=true;
}
assertTrue("did not receive expected exception",exceptionCaught);
}
}
InternalCallVerifier BooleanVerifier
/**
* Test whether configuration changes are visible in another thread.
*/
@Test public void testThread() throws ReconfigurationException {
ReconfigurableDummy dummy=new ReconfigurableDummy(conf1);
assertTrue(dummy.getConf().get(PROP1).equals(VAL1));
Thread dummyThread=new Thread(dummy);
dummyThread.start();
try {
Thread.sleep(500);
}
catch ( InterruptedException ignore) {
}
dummy.reconfigureProperty(PROP1,VAL2);
long endWait=Time.now() + 2000;
while (dummyThread.isAlive() && Time.now() < endWait) {
try {
Thread.sleep(50);
}
catch ( InterruptedException ignore) {
}
}
assertFalse("dummy thread should not be alive",dummyThread.isAlive());
dummy.running=false;
try {
dummyThread.join();
}
catch ( InterruptedException ignore) {
}
assertTrue(PROP1 + " is set to wrong value",dummy.getConf().get(PROP1).equals(VAL2));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test HA failover, where BK, as the shared storage, fails.
* Once it becomes available again, a standby can come up.
* Verify that any write happening after the BK fail is not
* available on the standby.
*/
@Test public void testFailoverWithFailingBKCluster() throws Exception {
int ensembleSize=numBookies + 1;
BookieServer newBookie=bkutil.newBookie();
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
BookieServer replacementBookie=null;
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverWithFail").toString());
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
Path p1=new Path("/testBKJMFailingBKCluster1");
Path p2=new Path("/testBKJMFailingBKCluster2");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p1);
newBookie.shutdown();
assertEquals("New bookie didn't stop",numBookies,bkutil.checkBookiesUp(numBookies,10));
try {
fs.mkdirs(p2);
fail("mkdirs should result in the NN exiting");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
}
cluster.shutdownNameNode(0);
try {
cluster.transitionToActive(1);
fail("Shouldn't have been able to transition with bookies down");
}
catch ( ExitException ee) {
assertTrue("Should shutdown due to required journal failure",ee.getMessage().contains("starting log segment 3 failed for required journal"));
}
replacementBookie=bkutil.newBookie();
assertEquals("Replacement bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
cluster.transitionToActive(1);
assertTrue(fs.exists(p1));
assertFalse(fs.exists(p2));
}
finally {
newBookie.shutdown();
if (replacementBookie != null) {
replacementBookie.shutdown();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
* the edits log segments to new bkjm shared edits.
* @throws Exception
*/
@Test public void testInitializeBKSharedEdits() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
HAUtil.setAllowStandbyReads(conf,true);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSNNTopology topology=MiniDFSNNTopology.simpleHATopology();
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
cluster.waitActive();
cluster.shutdownNameNodes();
File shareddir=new File(cluster.getSharedEditsDir(0,1));
assertTrue("Initial Shared edits dir not fully deleted",FileUtil.fullyDelete(shareddir));
assertCanNotStartNamenode(cluster,0);
assertCanNotStartNamenode(cluster,1);
Configuration nn1Conf=cluster.getConfiguration(0);
Configuration nn2Conf=cluster.getConfiguration(1);
nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/initializeSharedEdits").toString());
nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/initializeSharedEdits").toString());
BKJMUtil.addJournalManagerDefinition(nn1Conf);
BKJMUtil.addJournalManagerDefinition(nn2Conf);
assertFalse(NameNode.initializeSharedEdits(nn1Conf));
assertCanStartHANameNodes(cluster,conf,"/testBKJMInitialize");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test simple HA failover usecase with BK
*/
@Test public void testFailoverWithBK() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailover").toString());
BKJMUtil.addJournalManagerDefinition(conf);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).build();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
cluster.waitActive();
cluster.transitionToActive(0);
Path p=new Path("/testBKJMfailover");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
fs.mkdirs(p);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertTrue(fs.exists(p));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier NullVerifier
/**
* Verify the BKJM is creating the bookie available default path, when there
* is no 'dfs.namenode.bookkeeperjournal.zk.availablebookies' configured
*/
@Test public void testDefaultBKAvailablePath() throws Exception {
Configuration conf=new Configuration();
Assert.assertNull(BK_ROOT_PATH + " already exists",zkc.exists(BK_ROOT_PATH,false));
NamespaceInfo nsi=newNSInfo();
bkjm=new BookKeeperJournalManager(conf,URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-DefaultBKPath"),nsi);
bkjm.format(nsi);
Assert.assertNotNull("Bookie available path : " + BK_ROOT_PATH + " doesn't exists",zkc.exists(BK_ROOT_PATH,false));
}
InternalCallVerifier NullVerifier
/**
* Verify the BKJM is creating the bookie available path configured in
* 'dfs.namenode.bookkeeperjournal.zk.availablebookies'
*/
@Test public void testWithConfiguringBKAvailablePath() throws Exception {
String bkAvailablePath=BookKeeperJournalManager.BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT;
Configuration conf=new Configuration();
conf.setStrings(BookKeeperJournalManager.BKJM_ZK_LEDGERS_AVAILABLE_PATH,bkAvailablePath);
Assert.assertNull(bkAvailablePath + " already exists",zkc.exists(bkAvailablePath,false));
NamespaceInfo nsi=newNSInfo();
bkjm=new BookKeeperJournalManager(conf,URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-WithBKPath"),nsi);
bkjm.format(nsi);
Assert.assertNotNull("Bookie available path : " + bkAvailablePath + " doesn't exists",zkc.exists(bkAvailablePath,false));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that if enough bookies fail to prevent an ensemble,
* writes the bookkeeper will fail. Test that when once again
* an ensemble is available, it can continue to write.
*/
@Test public void testAllBookieFailure() throws Exception {
BookieServer bookieToFail=bkutil.newBookie();
BookieServer replacementBookie=null;
try {
int ensembleSize=numBookies + 1;
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
Configuration conf=new Configuration();
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
long txid=1;
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
bookieToFail.shutdown();
assertEquals("New bookie didn't die",numBookies,bkutil.checkBookiesUp(numBookies,10));
try {
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
fail("should not get to this stage");
}
catch ( IOException ioe) {
LOG.debug("Error writing to bookkeeper",ioe);
assertTrue("Invalid exception message",ioe.getMessage().contains("Failed to write to bookkeeper"));
}
replacementBookie=bkutil.newBookie();
assertEquals("New bookie didn't start",numBookies + 1,bkutil.checkBookiesUp(numBookies + 1,10));
bkjm.recoverUnfinalizedSegments();
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
}
catch ( Exception e) {
LOG.error("Exception in test",e);
throw e;
}
finally {
if (replacementBookie != null) {
replacementBookie.shutdown();
}
bookieToFail.shutdown();
if (bkutil.checkBookiesUp(numBookies,30) != numBookies) {
LOG.warn("Not all bookies from this test shut down, expect errors");
}
}
}
InternalCallVerifier NullVerifier
@Test public void testSimpleRecovery() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
;
for (long i=1; i <= 100; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.setReadyToFlush();
out.flush();
out.abort();
out.close();
assertNull(zkc.exists(bkjm.finalizedLedgerZNode(1,100),false));
assertNotNull(zkc.exists(bkjm.inprogressZNode(1),false));
bkjm.recoverUnfinalizedSegments();
assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(1,100),false));
assertNull(zkc.exists(bkjm.inprogressZNode(1),false));
}
IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testNumberOfTransactionsWithInprogressAtEnd() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"),nsi);
bkjm.format(nsi);
long txid=1;
for (long i=0; i < 3; i++) {
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,(txid - 1));
assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,(txid - 1)),false));
}
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE / 2; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
out.abort();
out.close();
long numTrans=bkjm.getNumberOfTransactions(1,true);
assertEquals((txid - 1),numTrans);
}
InternalCallVerifier EqualityVerifier
@Test public void testNumberOfTransactions() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-txncount"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 100; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(1,100);
long numTrans=bkjm.getNumberOfTransactions(1,true);
assertEquals(100,numTrans);
}
IterativeVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testNumberOfTransactionsWithGaps() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-gaps"),nsi);
bkjm.format(nsi);
long txid=1;
for (long i=0; i < 3; i++) {
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,txid - 1);
assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,txid - 1),false));
}
zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE + 1,DEFAULT_SEGMENT_SIZE * 2),-1);
long numTrans=bkjm.getNumberOfTransactions(1,true);
assertEquals(DEFAULT_SEGMENT_SIZE,numTrans);
try {
numTrans=bkjm.getNumberOfTransactions(DEFAULT_SEGMENT_SIZE + 1,true);
fail("Should have thrown corruption exception by this point");
}
catch ( JournalManager.CorruptionException ce) {
}
numTrans=bkjm.getNumberOfTransactions((DEFAULT_SEGMENT_SIZE * 2) + 1,true);
assertEquals(DEFAULT_SEGMENT_SIZE,numTrans);
}
InternalCallVerifier NullVerifier
@Test public void testSimpleWrite() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 100; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(1,100);
String zkpath=bkjm.finalizedLedgerZNode(1,100);
assertNotNull(zkc.exists(zkpath,false));
assertNull(zkc.exists(bkjm.inprogressZNode(1),false));
}
InternalCallVerifier NullVerifier
/**
* Test that if we fail between finalizing an inprogress and deleting the
* corresponding inprogress znode.
*/
@Test public void testRefinalizeAlreadyFinalizedInprogress() throws Exception {
URI uri=BKJMUtil.createJournalURI("/hdfsjournal-refinalizeInprogressLedger");
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
;
for (long i=1; i <= 100; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(i);
out.write(op);
}
out.close();
bkjm.close();
String inprogressZNode=bkjm.inprogressZNode(1);
String finalizedZNode=bkjm.finalizedLedgerZNode(1,100);
assertNotNull("inprogress znode doesn't exist",zkc.exists(inprogressZNode,null));
assertNull("finalized znode exists",zkc.exists(finalizedZNode,null));
byte[] inprogressData=zkc.getData(inprogressZNode,false,null);
bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.recoverUnfinalizedSegments();
bkjm.close();
assertNull("inprogress znode exists",zkc.exists(inprogressZNode,null));
assertNotNull("finalized znode doesn't exist",zkc.exists(finalizedZNode,null));
zkc.create(inprogressZNode,inprogressData,Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT);
bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.recoverUnfinalizedSegments();
bkjm.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests that the edit log file meta data reading from ZooKeeper should be
* able to handle the NoNodeException. bkjm.getInputStream(fromTxId,
* inProgressOk) should suppress the NoNodeException and continue. HDFS-3441.
*/
@Test public void testEditLogFileNotExistsWhenReadingMetadata() throws Exception {
URI uri=BKJMUtil.createJournalURI("/hdfsjournal-editlogfile");
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi);
bkjm.format(nsi);
try {
String zkpath1=startAndFinalizeLogSegment(bkjm,1,50);
String zkpath2=startAndFinalizeLogSegment(bkjm,51,100);
ZooKeeper zkspy=spy(BKJMUtil.connectZooKeeper());
bkjm.setZooKeeper(zkspy);
Mockito.doThrow(new KeeperException.NoNodeException(zkpath2 + " doesn't exists")).when(zkspy).getData(zkpath2,false,null);
List ledgerList=bkjm.getLedgerList(false);
assertEquals("List contains the metadata of non exists path.",1,ledgerList.size());
assertEquals("LogLedgerMetadata contains wrong zk paths.",zkpath1,ledgerList.get(0).getZkPath());
}
finally {
bkjm.close();
}
}
InternalCallVerifier EqualityVerifier
/**
* Test that a BookKeeper JM can continue to work across the
* failure of a bookie. This should be handled transparently
* by bookkeeper.
*/
@Test public void testOneBookieFailure() throws Exception {
BookieServer bookieToFail=bkutil.newBookie();
BookieServer replacementBookie=null;
try {
int ensembleSize=numBookies + 1;
assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
Configuration conf=new Configuration();
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize);
conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize);
long txid=1;
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"),nsi);
bkjm.format(nsi);
EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
replacementBookie=bkutil.newBookie();
assertEquals("replacement bookie didn't start",ensembleSize + 1,bkutil.checkBookiesUp(ensembleSize + 1,10));
bookieToFail.shutdown();
assertEquals("New bookie didn't die",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10));
for (long i=1; i <= 3; i++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.setReadyToFlush();
out.flush();
}
catch ( Exception e) {
LOG.error("Exception in test",e);
throw e;
}
finally {
if (replacementBookie != null) {
replacementBookie.shutdown();
}
bookieToFail.shutdown();
if (bkutil.checkBookiesUp(numBookies,30) != numBookies) {
LOG.warn("Not all bookies from this test shut down, expect errors");
}
}
}
UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
/**
* Create a bkjm namespace, write a journal from txid 1, close stream.
* Try to create a new journal from txid 1. Should throw an exception.
*/
@Test public void testWriteRestartFrom1() throws Exception {
NamespaceInfo nsi=newNSInfo();
BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"),nsi);
bkjm.format(nsi);
long txid=1;
long start=txid;
EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,(txid - 1));
txid=1;
try {
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Shouldn't be able to start another journal from " + txid + " when one already exists");
}
catch ( Exception ioe) {
LOG.info("Caught exception as expected",ioe);
}
txid=DEFAULT_SEGMENT_SIZE;
try {
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Shouldn't be able to start another journal from " + txid + " when one already exists");
}
catch ( IOException ioe) {
LOG.info("Caught exception as expected",ioe);
}
txid=DEFAULT_SEGMENT_SIZE + 1;
start=txid;
out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertNotNull(out);
for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) {
FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance();
op.setTransactionId(txid++);
out.write(op);
}
out.close();
bkjm.finalizeLogSegment(start,(txid - 1));
txid=DEFAULT_SEGMENT_SIZE * 4;
out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertNotNull(out);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* While boostrapping, in_progress transaction entries should be skipped.
* Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
*/
@Test public void testBootstrapStandbyWithActiveNN() throws Exception {
cluster.transitionToActive(0);
Configuration confNN1=cluster.getConfiguration(1);
DistributedFileSystem dfs=(DistributedFileSystem)HATestUtil.configureFailoverFs(cluster,confNN1);
for (int i=1; i <= 10; i++) {
dfs.mkdirs(new Path("/test" + i));
}
dfs.close();
cluster.shutdownNameNode(1);
deleteEditLogIfExists(confNN1);
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER,true);
cluster.getNameNodeRpc(0).saveNamespace();
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE,true);
int rc=BootstrapStandby.run(new String[]{"-force","-nonInteractive"},confNN1);
Assert.assertEquals("Mismatches return code",6,rc);
rc=BootstrapStandby.run(new String[]{"-force","-nonInteractive","-skipSharedEditsCheck"},confNN1);
Assert.assertEquals("Mismatches return code",0,rc);
confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,1);
cluster.restartNameNode(1);
cluster.transitionToStandby(1);
NameNode nn0=cluster.getNameNode(0);
HATestUtil.waitForStandbyToCatchUp(nn0,cluster.getNameNode(1));
long expectedCheckpointTxId=NameNodeAdapter.getNamesystem(nn0).getFSImage().getMostRecentCheckpointTxId();
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of((int)expectedCheckpointTxId));
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of((int)expectedCheckpointTxId));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
InternalCallVerifier EqualityVerifier
/**
* Tests that read should be able to read the data which updated with update
* api
*/
@Test public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
String data="inprogressNode";
CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH);
ci.init();
ci.update(data);
String inprogressNodePath=ci.read();
assertEquals("Not returning inprogressZnode","inprogressNode",inprogressNodePath);
}
InternalCallVerifier EqualityVerifier
/**
* Tests that read should return null if we clear the updated data in
* CurrentInprogress node
*/
@Test public void testReadShouldReturnNullAfterClear() throws Exception {
CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH);
ci.init();
ci.update("myInprogressZnode");
ci.read();
ci.clear();
String inprogressNodePath=ci.read();
assertEquals("Expecting null to be return",null,inprogressNodePath);
}
InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier
/**
* Tests that update should throw IOE, if version number modifies between read
* and update
*/
@Test(expected=IOException.class) public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead() throws Exception {
CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH);
ci.init();
ci.update("myInprogressZnode");
assertEquals("Not returning myInprogressZnode","myInprogressZnode",ci.read());
ci.update("YourInprogressZnode");
ci.update("myInprogressZnode");
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testCombinedOp() throws Exception {
OutputStream out=getOutputStream(defaultBufferSize);
writeData(out);
final int len1=dataLen / 8;
final int len2=dataLen / 10;
InputStream in=getInputStream(defaultBufferSize);
byte[] readData=new byte[len1];
readAll(in,readData,0,len1);
byte[] expectedData=new byte[len1];
System.arraycopy(data,0,expectedData,0,len1);
Assert.assertArrayEquals(readData,expectedData);
long pos=((Seekable)in).getPos();
Assert.assertEquals(len1,pos);
((Seekable)in).seek(pos + len2);
long n=in.skip(len2);
Assert.assertEquals(len2,n);
positionedReadCheck(in,dataLen / 4);
pos=((Seekable)in).getPos();
Assert.assertEquals(len1 + len2 + len2,pos);
ByteBuffer buf=ByteBuffer.allocate(len1);
int nRead=((ByteBufferReadable)in).read(buf);
readData=new byte[nRead];
buf.rewind();
buf.get(readData);
expectedData=new byte[nRead];
System.arraycopy(data,(int)pos,expectedData,0,nRead);
Assert.assertArrayEquals(readData,expectedData);
pos=((Seekable)in).getPos();
Assert.assertEquals(len1 + 2 * len2 + nRead,pos);
positionedReadCheck(in,dataLen / 3);
readData=new byte[len1];
readAll(in,readData,0,len1);
expectedData=new byte[len1];
System.arraycopy(data,(int)pos,expectedData,0,len1);
Assert.assertArrayEquals(readData,expectedData);
pos=((Seekable)in).getPos();
Assert.assertEquals(2 * len1 + 2 * len2 + nRead,pos);
buf=ByteBuffer.allocate(len1);
nRead=((ByteBufferReadable)in).read(buf);
readData=new byte[nRead];
buf.rewind();
buf.get(readData);
expectedData=new byte[nRead];
System.arraycopy(data,(int)pos,expectedData,0,nRead);
Assert.assertArrayEquals(readData,expectedData);
((Seekable)in).seek(dataLen);
buf.clear();
n=((ByteBufferReadable)in).read(buf);
Assert.assertEquals(n,-1);
in.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testHasEnhancedByteBufferAccess() throws Exception {
OutputStream out=getOutputStream(defaultBufferSize);
writeData(out);
InputStream in=getInputStream(defaultBufferSize);
final int len1=dataLen / 8;
ByteBuffer buffer=((HasEnhancedByteBufferAccess)in).read(getBufferPool(),len1,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
int n1=buffer.remaining();
byte[] readData=new byte[n1];
buffer.get(readData);
byte[] expectedData=new byte[n1];
System.arraycopy(data,0,expectedData,0,n1);
Assert.assertArrayEquals(readData,expectedData);
((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer);
readData=new byte[len1];
readAll(in,readData,0,len1);
expectedData=new byte[len1];
System.arraycopy(data,n1,expectedData,0,len1);
Assert.assertArrayEquals(readData,expectedData);
buffer=((HasEnhancedByteBufferAccess)in).read(getBufferPool(),len1,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
int n2=buffer.remaining();
readData=new byte[n2];
buffer.get(readData);
expectedData=new byte[n2];
System.arraycopy(data,n1 + len1,expectedData,0,n2);
Assert.assertArrayEquals(readData,expectedData);
((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer);
in.close();
}
InternalCallVerifier EqualityVerifier
@Test public void testRollNewVersion() throws Exception {
KeyProvider.KeyVersion mockKey=Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv=Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
KeyProvider cache=new CachingKeyProvider(mockProv,100,100);
Assert.assertEquals(mockKey,cache.getCurrentKey("k1"));
Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
cache.rollNewVersion("k1");
Assert.assertEquals(mockKey,cache.getCurrentKey("k1"));
Mockito.verify(mockProv,Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
cache.rollNewVersion("k1",new byte[0]);
Assert.assertEquals(mockKey,cache.getCurrentKey("k1"));
Mockito.verify(mockProv,Mockito.times(3)).getCurrentKey(Mockito.eq("k1"));
}
InternalCallVerifier EqualityVerifier
@Test public void testMetadata() throws Exception {
KeyProvider.Metadata mockMeta=Mockito.mock(KeyProvider.Metadata.class);
KeyProvider mockProv=Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(mockMeta);
Mockito.when(mockProv.getMetadata(Mockito.eq("k2"))).thenReturn(null);
KeyProvider cache=new CachingKeyProvider(mockProv,100,100);
Assert.assertEquals(mockMeta,cache.getMetadata("k1"));
Mockito.verify(mockProv,Mockito.times(1)).getMetadata(Mockito.eq("k1"));
Assert.assertEquals(mockMeta,cache.getMetadata("k1"));
Mockito.verify(mockProv,Mockito.times(1)).getMetadata(Mockito.eq("k1"));
Thread.sleep(200);
Assert.assertEquals(mockMeta,cache.getMetadata("k1"));
Mockito.verify(mockProv,Mockito.times(2)).getMetadata(Mockito.eq("k1"));
cache=new CachingKeyProvider(mockProv,100,100);
Assert.assertEquals(null,cache.getMetadata("k2"));
Mockito.verify(mockProv,Mockito.times(1)).getMetadata(Mockito.eq("k2"));
Assert.assertEquals(null,cache.getMetadata("k2"));
Mockito.verify(mockProv,Mockito.times(2)).getMetadata(Mockito.eq("k2"));
}
InternalCallVerifier EqualityVerifier
@Test public void testDeleteKey() throws Exception {
KeyProvider.KeyVersion mockKey=Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv=Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey);
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(new KMSClientProvider.KMSMetadata("c",0,"l",null,new Date(),1));
KeyProvider cache=new CachingKeyProvider(mockProv,100,100);
Assert.assertEquals(mockKey,cache.getCurrentKey("k1"));
Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv,Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
cache.deleteKey("k1");
Assert.assertEquals(mockKey,cache.getCurrentKey("k1"));
Mockito.verify(mockProv,Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv,Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0"));
}
InternalCallVerifier EqualityVerifier
@Test public void testCurrentKey() throws Exception {
KeyProvider.KeyVersion mockKey=Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv=Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null);
KeyProvider cache=new CachingKeyProvider(mockProv,100,100);
Assert.assertEquals(mockKey,cache.getCurrentKey("k1"));
Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey,cache.getCurrentKey("k1"));
Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
Thread.sleep(1200);
Assert.assertEquals(mockKey,cache.getCurrentKey("k1"));
Mockito.verify(mockProv,Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
cache=new CachingKeyProvider(mockProv,100,100);
Assert.assertEquals(null,cache.getCurrentKey("k2"));
Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k2"));
Assert.assertEquals(null,cache.getCurrentKey("k2"));
Mockito.verify(mockProv,Mockito.times(2)).getCurrentKey(Mockito.eq("k2"));
}
InternalCallVerifier EqualityVerifier
@Test public void testKeyVersion() throws Exception {
KeyProvider.KeyVersion mockKey=Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv=Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null);
KeyProvider cache=new CachingKeyProvider(mockProv,100,100);
Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv,Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv,Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
Thread.sleep(200);
Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv,Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0"));
cache=new CachingKeyProvider(mockProv,100,100);
Assert.assertEquals(null,cache.getKeyVersion("k2@0"));
Mockito.verify(mockProv,Mockito.times(1)).getKeyVersion(Mockito.eq("k2@0"));
Assert.assertEquals(null,cache.getKeyVersion("k2@0"));
Mockito.verify(mockProv,Mockito.times(2)).getKeyVersion(Mockito.eq("k2@0"));
}
InternalCallVerifier EqualityVerifier
@Test public void testKeyMaterial() throws Exception {
byte[] key1=new byte[]{1,2,3,4};
KeyProvider.KeyVersion obj=new KeyProvider.KeyVersion("key1","key1@1",key1);
assertEquals("key1@1",obj.getVersionName());
assertArrayEquals(new byte[]{1,2,3,4},obj.getMaterial());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMetadata() throws Exception {
DateFormat format=new SimpleDateFormat("y/m/d");
Date date=format.parse("2013/12/25");
KeyProvider.Metadata meta=new KeyProvider.Metadata("myCipher",100,null,null,date,123);
assertEquals("myCipher",meta.getCipher());
assertEquals(100,meta.getBitLength());
assertNull(meta.getDescription());
assertEquals(date,meta.getCreated());
assertEquals(123,meta.getVersions());
KeyProvider.Metadata second=new KeyProvider.Metadata(meta.serialize());
assertEquals(meta.getCipher(),second.getCipher());
assertEquals(meta.getBitLength(),second.getBitLength());
assertNull(second.getDescription());
assertTrue(second.getAttributes().isEmpty());
assertEquals(meta.getCreated(),second.getCreated());
assertEquals(meta.getVersions(),second.getVersions());
int newVersion=second.addVersion();
assertEquals(123,newVersion);
assertEquals(124,second.getVersions());
assertEquals(123,meta.getVersions());
format=new SimpleDateFormat("y/m/d");
date=format.parse("2013/12/25");
Map attributes=new HashMap();
attributes.put("a","A");
meta=new KeyProvider.Metadata("myCipher",100,"description",attributes,date,123);
assertEquals("myCipher",meta.getCipher());
assertEquals(100,meta.getBitLength());
assertEquals("description",meta.getDescription());
assertEquals(attributes,meta.getAttributes());
assertEquals(date,meta.getCreated());
assertEquals(123,meta.getVersions());
second=new KeyProvider.Metadata(meta.serialize());
assertEquals(meta.getCipher(),second.getCipher());
assertEquals(meta.getBitLength(),second.getBitLength());
assertEquals(meta.getDescription(),second.getDescription());
assertEquals(meta.getAttributes(),second.getAttributes());
assertEquals(meta.getCreated(),second.getCreated());
assertEquals(meta.getVersions(),second.getVersions());
newVersion=second.addVersion();
assertEquals(123,newVersion);
assertEquals(124,second.getVersions());
assertEquals(123,meta.getVersions());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMaterialGeneration() throws Exception {
MyKeyProvider kp=new MyKeyProvider();
KeyProvider.Options options=new KeyProvider.Options(new Configuration());
options.setCipher(CIPHER);
options.setBitLength(128);
kp.createKey("hello",options);
Assert.assertEquals(128,kp.size);
Assert.assertEquals(CIPHER,kp.algorithm);
Assert.assertNotNull(kp.material);
kp=new MyKeyProvider();
kp.rollNewVersion("hello");
Assert.assertEquals(128,kp.size);
Assert.assertEquals(CIPHER,kp.algorithm);
Assert.assertNotNull(kp.material);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOptions() throws Exception {
Configuration conf=new Configuration();
conf.set(KeyProvider.DEFAULT_CIPHER_NAME,"myCipher");
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,512);
Map attributes=new HashMap();
attributes.put("a","A");
KeyProvider.Options options=KeyProvider.options(conf);
assertEquals("myCipher",options.getCipher());
assertEquals(512,options.getBitLength());
options.setCipher("yourCipher");
options.setDescription("description");
options.setAttributes(attributes);
options.setBitLength(128);
assertEquals("yourCipher",options.getCipher());
assertEquals(128,options.getBitLength());
assertEquals("description",options.getDescription());
assertEquals(attributes,options.getAttributes());
options=KeyProvider.options(new Configuration());
assertEquals(KeyProvider.DEFAULT_CIPHER,options.getCipher());
assertEquals(KeyProvider.DEFAULT_BITLENGTH,options.getBitLength());
}
BranchVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGenerateEncryptedKey() throws Exception {
KeyProviderCryptoExtension.EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(encryptionKey.getName());
assertEquals("Version name of EEK should be EEK",KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName());
assertEquals("Name of EEK should be encryption key name",ENCRYPTION_KEY_NAME,ek1.getEncryptionKeyName());
assertNotNull("Expected encrypted key material",ek1.getEncryptedKeyVersion().getMaterial());
assertEquals("Length of encryption key material and EEK material should " + "be the same",encryptionKey.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length);
KeyVersion k1=kpExt.decryptEncryptedKey(ek1);
assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName());
assertEquals(encryptionKey.getMaterial().length,k1.getMaterial().length);
if (Arrays.equals(k1.getMaterial(),encryptionKey.getMaterial())) {
fail("Encrypted key material should not equal encryption key material");
}
if (Arrays.equals(ek1.getEncryptedKeyVersion().getMaterial(),encryptionKey.getMaterial())) {
fail("Encrypted key material should not equal decrypted key material");
}
KeyVersion k1a=kpExt.decryptEncryptedKey(ek1);
assertArrayEquals(k1.getMaterial(),k1a.getMaterial());
KeyProviderCryptoExtension.EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(encryptionKey.getName());
KeyVersion k2=kpExt.decryptEncryptedKey(ek2);
if (Arrays.equals(k1.getMaterial(),k2.getMaterial())) {
fail("Generated EEKs should have different material!");
}
if (Arrays.equals(ek1.getEncryptedKeyIv(),ek2.getEncryptedKeyIv())) {
fail("Generated EEKs should have different IVs!");
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptDecrypt() throws Exception {
KeyProviderCryptoExtension.EncryptedKeyVersion eek=kpExt.generateEncryptedKey(encryptionKey.getName());
final byte[] encryptedKeyIv=eek.getEncryptedKeyIv();
final byte[] encryptedKeyMaterial=eek.getEncryptedKeyVersion().getMaterial();
Cipher cipher=Cipher.getInstance("AES/CTR/NoPadding");
cipher.init(Cipher.DECRYPT_MODE,new SecretKeySpec(encryptionKey.getMaterial(),"AES"),new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion.deriveIV(encryptedKeyIv)));
final byte[] manualMaterial=cipher.doFinal(encryptedKeyMaterial);
EncryptedKeyVersion eek2=EncryptedKeyVersion.createForDecryption(eek.getEncryptionKeyVersionName(),eek.getEncryptedKeyIv(),eek.getEncryptedKeyVersion().getMaterial());
KeyVersion decryptedKey=kpExt.decryptEncryptedKey(eek2);
final byte[] apiMaterial=decryptedKey.getMaterial();
assertArrayEquals("Wrong key material from decryptEncryptedKey",manualMaterial,apiMaterial);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCreateExtension() throws Exception {
Configuration conf=new Configuration();
Credentials credentials=new Credentials();
KeyProvider kp=new UserProvider.Factory().createProvider(new URI("user:///"),conf);
KeyProviderDelegationTokenExtension kpDTE1=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp);
Assert.assertNotNull(kpDTE1);
Assert.assertNull(kpDTE1.addDelegationTokens("user",credentials));
MockKeyProvider mock=mock(MockKeyProvider.class);
when(mock.addDelegationTokens("renewer",credentials)).thenReturn(new Token>[]{new Token(null,null,new Text("kind"),new Text("service"))});
KeyProviderDelegationTokenExtension kpDTE2=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(mock);
Token>[] tokens=kpDTE2.addDelegationTokens("renewer",credentials);
Assert.assertNotNull(tokens);
Assert.assertEquals("kind",tokens[0].getKind().toString());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testJksProvider() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks";
File file=new File(tmpDir,"test.jks");
file.delete();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl);
checkSpecificProvider(conf,ourUrl);
Path path=ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs=path.getFileSystem(conf);
FileStatus s=fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist",file.isFile());
File oldFile=new File(file.getPath() + "_OLD");
file.renameTo(oldFile);
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted",!oldFile.exists());
verifyAfterReload(file,provider);
assertTrue(!oldFile.exists());
File newFile=new File(file.getPath() + "_NEW");
newFile.createNewFile();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("_NEW and current file should not exist together !!");
}
catch ( Exception e) {
}
finally {
if (newFile.exists()) {
newFile.delete();
}
}
file.renameTo(newFile);
file.delete();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
}
catch ( Exception e) {
Assert.fail("JKS should load from _NEW file !!");
}
verifyAfterReload(file,provider);
newFile.createNewFile();
file.renameTo(oldFile);
file.delete();
try {
provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
}
catch ( Exception e) {
Assert.fail("JKS should load from _OLD file !!");
}
finally {
if (newFile.exists()) {
newFile.delete();
}
}
verifyAfterReload(file,provider);
fs.setPermission(path,new FsPermission("777"));
checkPermissionRetention(conf,ourUrl,path);
}
InternalCallVerifier EqualityVerifier
@Test public void testUserProvider() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=UserProvider.SCHEME_NAME + ":///";
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl);
checkSpecificProvider(conf,ourUrl);
Credentials credentials=UserGroupInformation.getCurrentUser().getCredentials();
assertArrayEquals(new byte[]{1},credentials.getSecretKey(new Text("key4@0")));
assertArrayEquals(new byte[]{2},credentials.getSecretKey(new Text("key4@1")));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFactory() throws Exception {
Configuration conf=new Configuration();
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,UserProvider.SCHEME_NAME + ":///," + JavaKeyStoreProvider.SCHEME_NAME+ "://file"+ tmpDir+ "/test.jks");
List providers=KeyProviderFactory.getProviders(conf);
assertEquals(2,providers.size());
assertEquals(UserProvider.class,providers.get(0).getClass());
assertEquals(JavaKeyStoreProvider.class,providers.get(1).getClass());
assertEquals(UserProvider.SCHEME_NAME + ":///",providers.get(0).toString());
assertEquals(JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks",providers.get(1).toString());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test public void testJksProviderPasswordViaConfig() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks";
File file=new File(tmpDir,"test.jks");
file.delete();
try {
conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl);
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"javakeystoreprovider.password");
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
provider.createKey("key3",new byte[16],KeyProvider.options(conf));
provider.flush();
}
catch ( Exception ex) {
Assert.fail("could not create keystore with password file");
}
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
Assert.assertNotNull(provider.getCurrentKey("key3"));
try {
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"bar");
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("using non existing password file, it should fail");
}
catch ( IOException ex) {
}
try {
conf.set(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY,"core-site.xml");
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("using different password file, it should fail");
}
catch ( IOException ex) {
}
try {
conf.unset(JavaKeyStoreProvider.KEYSTORE_PASSWORD_FILE_KEY);
KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("No password file property, env not set, it should fail");
}
catch ( IOException ex) {
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidProvider() throws Exception {
final String[] args1={"create","key1","-cipher","AES","-provider","sdff://file/tmp/keystore.jceks"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidKeySize() throws Exception {
final String[] args1={"create","key1","-size","56","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransientProviderOnlyConfig() throws Exception {
final String[] args1={"create","key1"};
int rc=0;
KeyShell ks=new KeyShell();
Configuration config=new Configuration();
config.set(KeyProviderFactory.KEY_PROVIDER_PATH,"user:///");
ks.setConf(config);
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured."));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKeySuccessfulKeyLifecycle() throws Exception {
int rc=0;
String keyName="key1";
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
outContent.reset();
final String[] args1={"create",keyName,"-provider",jceksProvider};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created"));
String listOut=listKeys(ks,false);
assertTrue(listOut.contains(keyName));
listOut=listKeys(ks,true);
assertTrue(listOut.contains(keyName));
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("created"));
outContent.reset();
final String[] args2={"roll",keyName,"-provider",jceksProvider};
rc=ks.run(args2);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "rolled."));
deleteKey(ks,keyName);
listOut=listKeys(ks,false);
assertFalse(listOut,listOut.contains(keyName));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAttributes() throws Exception {
int rc;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
final String[] args1={"create","keyattr1","-provider",jceksProvider,"-attr","foo=bar"};
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("keyattr1 has been " + "successfully created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr1"));
assertTrue(listOut.contains("attributes: [foo=bar]"));
outContent.reset();
final String[] args2={"create","keyattr2","-provider",jceksProvider,"-attr","=bar"};
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="foo";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="=";
rc=ks.run(args2);
assertEquals(1,rc);
outContent.reset();
args2[5]="a=b=c";
rc=ks.run(args2);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr2"));
assertTrue(listOut.contains("attributes: [a=b=c]"));
outContent.reset();
final String[] args3={"create","keyattr3","-provider",jceksProvider,"-attr","foo = bar","-attr"," glarch =baz ","-attr","abc=def"};
rc=ks.run(args3);
assertEquals(0,rc);
listOut=listKeys(ks,true);
assertTrue(listOut.contains("keyattr3"));
assertTrue(listOut.contains("[foo=bar]"));
assertTrue(listOut.contains("[glarch=baz]"));
assertTrue(listOut.contains("[abc=def]"));
outContent.reset();
final String[] args4={"create","keyattr4","-provider",jceksProvider,"-attr","foo=bar","-attr","foo=glarch"};
rc=ks.run(args4);
assertEquals(1,rc);
deleteKey(ks,"keyattr1");
deleteKey(ks,"keyattr2");
deleteKey(ks,"keyattr3");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidCipher() throws Exception {
final String[] args1={"create","key1","-cipher","LJM","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKeySuccessfulCreationWithDescription() throws Exception {
outContent.reset();
final String[] args1={"create","key1","-provider",jceksProvider,"-description","someDescription"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("key1 has been successfully " + "created"));
String listOut=listKeys(ks,true);
assertTrue(listOut.contains("description"));
assertTrue(listOut.contains("someDescription"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransientProviderWarning() throws Exception {
final String[] args1={"create","key1","-cipher","AES","-provider","user:///"};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFullCipher() throws Exception {
final String keyName="key1";
final String[] args1={"create",keyName,"-cipher","AES/CBC/pkcs5Padding","-provider",jceksProvider};
int rc=0;
KeyShell ks=new KeyShell();
ks.setConf(new Configuration());
rc=ks.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created"));
deleteKey(ks,keyName);
}
InternalCallVerifier EqualityVerifier
/**
* Verify getAtMost when SyncGeneration Policy = ALL
*/
@Test public void testgetAtMostPolicyALL() throws Exception {
MockFiller filler=new MockFiller();
ValueQueue vq=new ValueQueue(10,0.1f,300,1,SyncGenerationPolicy.ALL,filler);
Assert.assertEquals("test",vq.getNext("k1"));
Assert.assertEquals(1,filler.getTop().num);
Assert.assertEquals(10,vq.getAtMost("k1",10).size());
Assert.assertEquals(10,filler.getTop().num);
Assert.assertEquals(19,vq.getAtMost("k1",19).size());
Assert.assertEquals(19,filler.getTop().num);
vq.shutdown();
}
InternalCallVerifier EqualityVerifier
/**
* Verifies that Queue is initialized (Warmed-up) for provided keys
*/
@Test public void testWarmUp() throws Exception {
MockFiller filler=new MockFiller();
ValueQueue vq=new ValueQueue(10,0.5f,300,1,SyncGenerationPolicy.ALL,filler);
vq.initializeQueuesForKeys("k1","k2","k3");
FillInfo[] fillInfos={filler.getTop(),filler.getTop(),filler.getTop()};
Assert.assertEquals(5,fillInfos[0].num);
Assert.assertEquals(5,fillInfos[1].num);
Assert.assertEquals(5,fillInfos[2].num);
Assert.assertEquals(Sets.newHashSet("k1","k2","k3"),Sets.newHashSet(fillInfos[0].key,fillInfos[1].key,fillInfos[2].key));
vq.shutdown();
}
InternalCallVerifier EqualityVerifier
/**
* Verify getAtMost when SyncGeneration Policy = ALL
*/
@Test public void testgetAtMostPolicyATLEAST_ONE() throws Exception {
MockFiller filler=new MockFiller();
ValueQueue vq=new ValueQueue(10,0.3f,300,1,SyncGenerationPolicy.ATLEAST_ONE,filler);
Assert.assertEquals("test",vq.getNext("k1"));
Assert.assertEquals(3,filler.getTop().num);
Assert.assertEquals(2,vq.getAtMost("k1",10).size());
Assert.assertEquals(10,filler.getTop().num);
vq.shutdown();
}
InternalCallVerifier EqualityVerifier
/**
* Verifies that the refill task is executed after "checkInterval" if
* num values below "lowWatermark"
*/
@Test public void testRefill() throws Exception {
MockFiller filler=new MockFiller();
ValueQueue vq=new ValueQueue(10,0.1f,300,1,SyncGenerationPolicy.ALL,filler);
Assert.assertEquals("test",vq.getNext("k1"));
Assert.assertEquals(1,filler.getTop().num);
vq.getNext("k1");
Assert.assertEquals(1,filler.getTop().num);
Assert.assertEquals(10,filler.getTop().num);
vq.shutdown();
}
InternalCallVerifier EqualityVerifier
/**
* Verify getAtMost when SyncGeneration Policy = LOW_WATERMARK
*/
@Test public void testgetAtMostPolicyLOW_WATERMARK() throws Exception {
MockFiller filler=new MockFiller();
ValueQueue vq=new ValueQueue(10,0.3f,300,1,SyncGenerationPolicy.LOW_WATERMARK,filler);
Assert.assertEquals("test",vq.getNext("k1"));
Assert.assertEquals(3,filler.getTop().num);
Assert.assertEquals(3,vq.getAtMost("k1",10).size());
Assert.assertEquals(1,filler.getTop().num);
Assert.assertEquals(10,filler.getTop().num);
vq.shutdown();
}
InternalCallVerifier EqualityVerifier
/**
* Verifies that the No refill Happens after "checkInterval" if
* num values above "lowWatermark"
*/
@Test public void testNoRefill() throws Exception {
MockFiller filler=new MockFiller();
ValueQueue vq=new ValueQueue(10,0.5f,300,1,SyncGenerationPolicy.ALL,filler);
Assert.assertEquals("test",vq.getNext("k1"));
Assert.assertEquals(5,filler.getTop().num);
Assert.assertEquals(null,filler.getTop());
vq.shutdown();
}
UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test public void testACLs() throws Exception {
Configuration conf=new Configuration();
conf.set("hadoop.security.authentication","kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir=getTestDir();
conf=createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type","kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal","HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules","DEFAULT");
for ( KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getConfigKey(),type.toString());
}
conf.set(KMSACLs.Type.CREATE.getConfigKey(),KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getConfigKey(),KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
writeConf(testDir,conf);
runServer(null,null,testDir,new KMSCallable(){
@Override public Void call() throws Exception {
final Configuration conf=new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,128);
final URI uri=createKMSUri(getKMSUrl());
doAs("client",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
kp.createKey("k",new KeyProvider.Options(conf));
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.createKey("k",new byte[16],new KeyProvider.Options(conf));
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.rollNewVersion("k");
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.rollNewVersion("k",new byte[16]);
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeys();
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeysMetadata("k");
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeyVersion("k@0");
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getCurrentKey("k");
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getMetadata("k");
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeyVersions("k");
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
doAs("CREATE",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
KeyProvider.KeyVersion kv=kp.createKey("k0",new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
doAs("DELETE",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
kp.deleteKey("k0");
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
doAs("SET_KEY_MATERIAL",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
KeyProvider.KeyVersion kv=kp.createKey("k1",new byte[16],new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
doAs("ROLLOVER",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
KeyProvider.KeyVersion kv=kp.rollNewVersion("k1");
Assert.assertNull(kv.getMaterial());
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
doAs("SET_KEY_MATERIAL",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
KeyProvider.KeyVersion kv=kp.rollNewVersion("k1",new byte[16]);
Assert.assertNull(kv.getMaterial());
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
final KeyVersion currKv=doAs("GET",new PrivilegedExceptionAction(){
@Override public KeyVersion run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
kp.getKeyVersion("k1@0");
KeyVersion kv=kp.getCurrentKey("k1");
return kv;
}
catch ( Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
}
);
final EncryptedKeyVersion encKv=doAs("GENERATE_EEK",new PrivilegedExceptionAction(){
@Override public EncryptedKeyVersion run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
KeyProviderCryptoExtension kpCE=KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1=kpCE.generateEncryptedKey(currKv.getName());
return ek1;
}
catch ( Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
}
);
doAs("DECRYPT_EEK",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
KeyProviderCryptoExtension kpCE=KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpCE.decryptEncryptedKey(encKv);
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
doAs("GET_KEYS",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
kp.getKeys();
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
doAs("GET_METADATA",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
KeyProvider kp=new KMSClientProvider(uri,conf);
try {
kp.getMetadata("k1");
kp.getKeysMetadata("k1");
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
KMSWebApp.getACLs().stopReloader();
Thread.sleep(10);
conf.set(KMSACLs.Type.CREATE.getConfigKey(),"foo");
writeConf(testDir,conf);
Thread.sleep(1000);
KMSWebApp.getACLs().run();
doAs("CREATE",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
KeyProvider kp=new KMSClientProvider(uri,conf);
KeyProvider.KeyVersion kv=kp.createKey("k2",new KeyProvider.Options(conf));
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
return null;
}
}
);
}
UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test public void testServicePrincipalACLs() throws Exception {
Configuration conf=new Configuration();
conf.set("hadoop.security.authentication","kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir=getTestDir();
conf=createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type","kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal","HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules","DEFAULT");
for ( KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getConfigKey()," ");
}
conf.set(KMSACLs.Type.CREATE.getConfigKey(),"client");
writeConf(testDir,conf);
runServer(null,null,testDir,new KMSCallable(){
@Override public Void call() throws Exception {
final Configuration conf=new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,128);
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,64);
final URI uri=createKMSUri(getKMSUrl());
doAs("client",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
KeyProvider kp=new KMSClientProvider(uri,conf);
KeyProvider.KeyVersion kv=kp.createKey("ck0",new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
doAs("client/host",new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
KeyProvider kp=new KMSClientProvider(uri,conf);
KeyProvider.KeyVersion kv=kp.createKey("ck1",new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
return null;
}
}
);
}
UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test public void testProxyUser() throws Exception {
Configuration conf=new Configuration();
conf.set("hadoop.security.authentication","kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir=getTestDir();
conf=createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type","kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal","HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules","DEFAULT");
conf.set("hadoop.kms.proxyuser.client.users","foo");
conf.set("hadoop.kms.proxyuser.client.hosts","*");
writeConf(testDir,conf);
runServer(null,null,testDir,new KMSCallable(){
@Override public Void call() throws Exception {
final Configuration conf=new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,64);
final URI uri=createKMSUri(getKMSUrl());
UserGroupInformation clientUgi=UserGroupInformation.loginUserFromKeytabAndReturnUGI("client",keytab.getAbsolutePath());
clientUgi.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
final KeyProvider kp=new KMSClientProvider(uri,conf);
kp.createKey("kAA",new KeyProvider.Options(conf));
UserGroupInformation fooUgi=UserGroupInformation.createRemoteUser("foo");
fooUgi.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
Assert.assertNotNull(kp.createKey("kBB",new KeyProvider.Options(conf)));
return null;
}
}
);
UserGroupInformation foo1Ugi=UserGroupInformation.createRemoteUser("foo1");
foo1Ugi.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
kp.createKey("kCC",new KeyProvider.Options(conf));
Assert.fail();
}
catch ( AuthorizationException ex) {
}
catch ( Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
}
);
return null;
}
}
);
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testKMSProvider() throws Exception {
Configuration conf=new Configuration();
conf.set("hadoop.security.authentication","kerberos");
UserGroupInformation.setConfiguration(conf);
File confDir=getTestDir();
conf=createBaseKMSConf(confDir);
writeConf(confDir,conf);
runServer(null,null,confDir,new KMSCallable(){
@Override public Void call() throws Exception {
Date started=new Date();
Configuration conf=new Configuration();
URI uri=createKMSUri(getKMSUrl());
KeyProvider kp=new KMSClientProvider(uri,conf);
Assert.assertTrue(kp.getKeys().isEmpty());
Assert.assertEquals(0,kp.getKeysMetadata().length);
KeyProvider.Options options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("l1");
KeyProvider.KeyVersion kv0=kp.createKey("k1",options);
Assert.assertNotNull(kv0);
Assert.assertNotNull(kv0.getVersionName());
Assert.assertNotNull(kv0.getMaterial());
KeyProvider.KeyVersion kv1=kp.getKeyVersion(kv0.getVersionName());
Assert.assertEquals(kv0.getVersionName(),kv1.getVersionName());
Assert.assertNotNull(kv1.getMaterial());
KeyProvider.KeyVersion cv1=kp.getCurrentKey("k1");
Assert.assertEquals(kv0.getVersionName(),cv1.getVersionName());
Assert.assertNotNull(cv1.getMaterial());
KeyProvider.Metadata m1=kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding",m1.getCipher());
Assert.assertEquals("AES",m1.getAlgorithm());
Assert.assertEquals(128,m1.getBitLength());
Assert.assertEquals(1,m1.getVersions());
Assert.assertNotNull(m1.getCreated());
Assert.assertTrue(started.before(m1.getCreated()));
List lkv1=kp.getKeyVersions("k1");
Assert.assertEquals(1,lkv1.size());
Assert.assertEquals(kv0.getVersionName(),lkv1.get(0).getVersionName());
Assert.assertNotNull(kv1.getMaterial());
KeyProvider.KeyVersion kv2=kp.rollNewVersion("k1");
Assert.assertNotSame(kv0.getVersionName(),kv2.getVersionName());
Assert.assertNotNull(kv2.getMaterial());
kv2=kp.getKeyVersion(kv2.getVersionName());
boolean eq=true;
for (int i=0; i < kv1.getMaterial().length; i++) {
eq=eq && kv1.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertFalse(eq);
KeyProvider.KeyVersion cv2=kp.getCurrentKey("k1");
Assert.assertEquals(kv2.getVersionName(),cv2.getVersionName());
Assert.assertNotNull(cv2.getMaterial());
eq=true;
for (int i=0; i < kv1.getMaterial().length; i++) {
eq=eq && cv2.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertTrue(eq);
List lkv2=kp.getKeyVersions("k1");
Assert.assertEquals(2,lkv2.size());
Assert.assertEquals(kv1.getVersionName(),lkv2.get(0).getVersionName());
Assert.assertNotNull(lkv2.get(0).getMaterial());
Assert.assertEquals(kv2.getVersionName(),lkv2.get(1).getVersionName());
Assert.assertNotNull(lkv2.get(1).getMaterial());
KeyProvider.Metadata m2=kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding",m2.getCipher());
Assert.assertEquals("AES",m2.getAlgorithm());
Assert.assertEquals(128,m2.getBitLength());
Assert.assertEquals(2,m2.getVersions());
Assert.assertNotNull(m2.getCreated());
Assert.assertTrue(started.before(m2.getCreated()));
List ks1=kp.getKeys();
Assert.assertEquals(1,ks1.size());
Assert.assertEquals("k1",ks1.get(0));
KeyProvider.Metadata[] kms1=kp.getKeysMetadata("k1");
Assert.assertEquals(1,kms1.length);
Assert.assertEquals("AES/CTR/NoPadding",kms1[0].getCipher());
Assert.assertEquals("AES",kms1[0].getAlgorithm());
Assert.assertEquals(128,kms1[0].getBitLength());
Assert.assertEquals(2,kms1[0].getVersions());
Assert.assertNotNull(kms1[0].getCreated());
Assert.assertTrue(started.before(kms1[0].getCreated()));
KeyProvider.KeyVersion kv=kp.getCurrentKey("k1");
KeyProviderCryptoExtension kpExt=KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(kv.getName());
Assert.assertEquals(KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName());
Assert.assertNotNull(ek1.getEncryptedKeyVersion().getMaterial());
Assert.assertEquals(kv.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length);
KeyProvider.KeyVersion k1=kpExt.decryptEncryptedKey(ek1);
Assert.assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName());
KeyProvider.KeyVersion k1a=kpExt.decryptEncryptedKey(ek1);
Assert.assertArrayEquals(k1.getMaterial(),k1a.getMaterial());
Assert.assertEquals(kv.getMaterial().length,k1.getMaterial().length);
EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(kv.getName());
KeyProvider.KeyVersion k2=kpExt.decryptEncryptedKey(ek2);
boolean isEq=true;
for (int i=0; isEq && i < ek2.getEncryptedKeyVersion().getMaterial().length; i++) {
isEq=k2.getMaterial()[i] == k1.getMaterial()[i];
}
Assert.assertFalse(isEq);
kp.deleteKey("k1");
Assert.assertNull(kp.getKeyVersion("k1"));
Assert.assertNull(kp.getKeyVersions("k1"));
Assert.assertNull(kp.getMetadata("k1"));
Assert.assertTrue(kp.getKeys().isEmpty());
Assert.assertEquals(0,kp.getKeysMetadata().length);
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
kp.createKey("k2",options);
KeyProvider.Metadata meta=kp.getMetadata("k2");
Assert.assertNull(meta.getDescription());
Assert.assertTrue(meta.getAttributes().isEmpty());
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
kp.createKey("k3",options);
meta=kp.getMetadata("k3");
Assert.assertEquals("d",meta.getDescription());
Assert.assertTrue(meta.getAttributes().isEmpty());
Map attributes=new HashMap();
attributes.put("a","A");
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setAttributes(attributes);
kp.createKey("k4",options);
meta=kp.getMetadata("k4");
Assert.assertNull(meta.getDescription());
Assert.assertEquals(attributes,meta.getAttributes());
options=new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
options.setAttributes(attributes);
kp.createKey("k5",options);
meta=kp.getMetadata("k5");
Assert.assertEquals("d",meta.getDescription());
Assert.assertEquals(attributes,meta.getAttributes());
KeyProviderDelegationTokenExtension kpdte=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp);
Credentials credentials=new Credentials();
kpdte.addDelegationTokens("foo",credentials);
Assert.assertEquals(1,credentials.getAllTokens().size());
InetSocketAddress kmsAddr=new InetSocketAddress(getKMSUrl().getHost(),getKMSUrl().getPort());
Assert.assertEquals(new Text("kms-dt"),credentials.getToken(SecurityUtil.buildTokenService(kmsAddr)).getKind());
return null;
}
}
);
}
InternalCallVerifier BooleanVerifier
@Test public void testCustom(){
Configuration conf=new Configuration(false);
for ( KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getConfigKey(),type.toString() + " ");
}
KMSACLs acls=new KMSACLs(conf);
for ( KMSACLs.Type type : KMSACLs.Type.values()) {
Assert.assertTrue(acls.hasAccess(type,UserGroupInformation.createRemoteUser(type.toString())));
Assert.assertFalse(acls.hasAccess(type,UserGroupInformation.createRemoteUser("foo")));
}
}
InternalCallVerifier EqualityVerifier
@Test public void testGetTheStandardDeviation() throws Exception {
String args[]=new String[2];
args[0]=INPUT;
args[1]=STDDEV_OUTPUT;
WordStandardDeviation wsd=new WordStandardDeviation();
ToolRunner.run(new Configuration(),wsd,args);
double stddev=wsd.getStandardDeviation();
WordStdDevReader wr=new WordStdDevReader();
assertEquals(stddev,wr.read(INPUT),0.0);
}
InternalCallVerifier EqualityVerifier
@Test public void testGetTheMean() throws Exception {
String args[]=new String[2];
args[0]=INPUT;
args[1]=MEAN_OUTPUT;
WordMean wm=new WordMean();
ToolRunner.run(new Configuration(),wm,args);
double mean=wm.getMean();
WordMeanReader wr=new WordMeanReader();
assertEquals(mean,wr.read(INPUT),0.0);
}
InternalCallVerifier EqualityVerifier
@Test public void testGetTheMedian() throws Exception {
String args[]=new String[2];
args[0]=INPUT;
args[1]=MEDIAN_OUTPUT;
WordMedian wm=new WordMedian();
ToolRunner.run(new Configuration(),wm,args);
double median=wm.getMedian();
WordMedianReader wr=new WordMedianReader();
assertEquals(median,wr.read(INPUT),0.0);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testStatisticsOperations() throws Exception {
final Statistics stats=new Statistics("file");
Assert.assertEquals(0L,stats.getBytesRead());
Assert.assertEquals(0L,stats.getBytesWritten());
Assert.assertEquals(0,stats.getWriteOps());
stats.incrementBytesWritten(1000);
Assert.assertEquals(1000L,stats.getBytesWritten());
Assert.assertEquals(0,stats.getWriteOps());
stats.incrementWriteOps(123);
Assert.assertEquals(123,stats.getWriteOps());
Thread thread=new Thread(){
@Override public void run(){
stats.incrementWriteOps(1);
}
}
;
thread.start();
Uninterruptibles.joinUninterruptibly(thread);
Assert.assertEquals(124,stats.getWriteOps());
Statistics stats2=new Statistics(stats);
stats.reset();
Assert.assertEquals(0,stats.getWriteOps());
Assert.assertEquals(0L,stats.getBytesWritten());
Assert.assertEquals(0L,stats.getBytesRead());
Assert.assertEquals(124,stats2.getWriteOps());
Assert.assertEquals(1000L,stats2.getBytesWritten());
Assert.assertEquals(0L,stats2.getBytesRead());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStatistics() throws IOException, URISyntaxException {
URI fsUri=getFsUri();
Statistics stats=FileContext.getStatistics(fsUri);
Assert.assertEquals(0,stats.getBytesRead());
Path filePath=fileContextTestHelper.getTestRootPath(fc,"file1");
createFile(fc,filePath,numBlocks,blockSize);
Assert.assertEquals(0,stats.getBytesRead());
verifyWrittenBytes(stats);
FSDataInputStream fstr=fc.open(filePath);
byte[] buf=new byte[blockSize];
int bytesRead=fstr.read(buf,0,blockSize);
fstr.read(0,buf,0,blockSize);
Assert.assertEquals(blockSize,bytesRead);
verifyReadBytes(stats);
verifyWrittenBytes(stats);
verifyReadBytes(FileContext.getStatistics(getFsUri()));
Map statsMap=FileContext.getAllStatistics();
URI exactUri=getSchemeAuthorityUri();
verifyWrittenBytes(statsMap.get(exactUri));
fc.delete(filePath,true);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testListStatusFilterWithNoMatches() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA2),getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.listStatus(getTestRootPath(fSys,"test"),TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop"),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop2"),paths));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOverwrite() throws IOException {
Path path=getTestRootPath(fSys,"test/hadoop/file");
fSys.mkdirs(path.getParent());
createFile(path);
Assert.assertTrue("Exists",exists(fSys,path));
Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen());
try {
createFile(path);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
FSDataOutputStream out=fSys.create(path,true,4096);
out.write(data,0,data.length);
out.close();
Assert.assertTrue("Exists",exists(fSys,path));
Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=getTestRootPath(fSys,"test/hadoop");
Assert.assertFalse(exists(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
createFile(getTestRootPath(fSys,"test/hadoop/file"));
Path testSubDir=getTestRootPath(fSys,"test/hadoop/file/subdir");
try {
fSys.mkdirs(testSubDir);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fSys,testSubDir));
Path testDeepSubDir=getTestRootPath(fSys,"test/hadoop/file/deep/sub/dir");
try {
fSys.mkdirs(testDeepSubDir);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fSys,testDeepSubDir));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testGetWrappedInputStream() throws IOException {
Path src=getTestRootPath(fSys,"test/hadoop/file");
createFile(src);
FSDataInputStream in=fSys.open(src);
InputStream is=in.getWrappedStream();
in.close();
Assert.assertNotNull(is);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
Path workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test"));
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(),fSys.getWorkingDirectory());
workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test"));
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
absoluteDir=getTestRootPath(fSys,"test/existingDir2");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
Path absolutePath=new Path(absoluteDir,"foo");
createFile(fSys,absolutePath);
fSys.open(new Path("foo")).close();
fSys.mkdirs(new Path("newDir"));
Assert.assertTrue(isDir(fSys,new Path(absoluteDir,"newDir")));
}
InternalCallVerifier EqualityVerifier
@Test public void testWDAbsolute() throws IOException {
Path absoluteDir=new Path(fSys.getUri() + "/test/existingDir");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusFilterWithEmptyPathResults() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/?"),DEFAULT_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/?"),TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusNonExistentFile() throws Exception {
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf"));
Assert.assertNull(paths);
paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf/?"));
Assert.assertEquals(0,paths.length);
paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf/xyz*/?"));
Assert.assertEquals(0,paths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteEmptyDirectory() throws IOException {
Path dir=getTestRootPath(fSys,"test/hadoop");
fSys.mkdirs(dir);
Assert.assertTrue("Dir exists",exists(fSys,dir));
Assert.assertTrue("Deleted",fSys.delete(dir,false));
Assert.assertFalse("Dir doesn't exist",exists(fSys,dir));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusWithNoMatchesInPath() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/?"));
Assert.assertEquals(0,paths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteInNonExistentDirectory() throws IOException {
Path path=getTestRootPath(fSys,"test/hadoop/file");
Assert.assertFalse("Parent doesn't exist",exists(fSys,path.getParent()));
createFile(path);
Assert.assertTrue("Exists",exists(fSys,path));
Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen());
Assert.assertTrue("Parent exists",exists(fSys,path.getParent()));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDeleteRecursively() throws IOException {
Path dir=getTestRootPath(fSys,"test/hadoop");
Path file=getTestRootPath(fSys,"test/hadoop/file");
Path subdir=getTestRootPath(fSys,"test/hadoop/subdir");
createFile(file);
fSys.mkdirs(subdir);
Assert.assertTrue("File exists",exists(fSys,file));
Assert.assertTrue("Dir exists",exists(fSys,dir));
Assert.assertTrue("Subdir exists",exists(fSys,subdir));
try {
fSys.delete(dir,false);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertTrue("File still exists",exists(fSys,file));
Assert.assertTrue("Dir still exists",exists(fSys,dir));
Assert.assertTrue("Subdir still exists",exists(fSys,subdir));
Assert.assertTrue("Deleted",fSys.delete(dir,true));
Assert.assertFalse("File doesn't exist",exists(fSys,file));
Assert.assertFalse("Dir doesn't exist",exists(fSys,dir));
Assert.assertFalse("Subdir doesn't exist",exists(fSys,subdir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatus() throws Exception {
Path[] testDirs={getTestRootPath(fSys,"test/hadoop/a"),getTestRootPath(fSys,"test/hadoop/b"),getTestRootPath(fSys,"test/hadoop/c/1")};
Assert.assertFalse(exists(fSys,testDirs[0]));
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
FileStatus[] paths=fSys.listStatus(getTestRootPath(fSys,"test"));
Assert.assertEquals(1,paths.length);
Assert.assertEquals(getTestRootPath(fSys,"test/hadoop"),paths[0].getPath());
paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop"));
Assert.assertEquals(3,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/a"),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/b"),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/c"),paths));
paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop/a"));
Assert.assertEquals(0,paths.length);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testFsStatus() throws Exception {
FsStatus fsStatus=fSys.getStatus(null);
Assert.assertNotNull(fsStatus);
Assert.assertTrue(fsStatus.getUsed() >= 0);
Assert.assertTrue(fsStatus.getRemaining() >= 0);
Assert.assertTrue(fsStatus.getCapacity() >= 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/ax?"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirs() throws Exception {
Path testDir=getTestRootPath(fSys,"test/hadoop");
Assert.assertFalse(exists(fSys,testDir));
Assert.assertFalse(isFile(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
Assert.assertFalse(isFile(fSys,testDir));
fSys.mkdirs(testDir);
Assert.assertTrue(exists(fSys,testDir));
Assert.assertFalse(isFile(fSys,testDir));
Path parentDir=testDir.getParent();
Assert.assertTrue(exists(fSys,parentDir));
Assert.assertFalse(isFile(fSys,parentDir));
Path grandparentDir=parentDir.getParent();
Assert.assertTrue(exists(fSys,grandparentDir));
Assert.assertFalse(isFile(fSys,grandparentDir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*/*"));
Assert.assertEquals(4,paths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA2),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatusFilterWithSomeMatches() throws Exception {
Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)};
if (exists(fSys,testDirs[0]) == false) {
for ( Path path : testDirs) {
fSys.mkdirs(path);
}
}
FileStatus[] filteredPaths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths));
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testCopyToLocalWithUseRawLocalFileSystemOption() throws Exception {
Configuration conf=new Configuration();
FileSystem fSys=new RawLocalFileSystem();
Path fileToFS=new Path(getTestRootDir(),"fs.txt");
Path fileToLFS=new Path(getTestRootDir(),"test.txt");
Path crcFileAtLFS=new Path(getTestRootDir(),".test.txt.crc");
fSys.initialize(new URI("file:///"),conf);
writeFile(fSys,fileToFS);
if (fSys.exists(crcFileAtLFS)) Assert.assertTrue("CRC files not deleted",fSys.delete(crcFileAtLFS,true));
fSys.copyToLocalFile(false,fileToFS,fileToLFS,true);
Assert.assertFalse("CRC files are created",fSys.exists(crcFileAtLFS));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"aDir");
fc.mkdir(f,FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,f));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirRecursiveWithNonExistingDir() throws IOException {
Path f=getTestRootPath(fc,"NonExistant2/aDir");
fc.mkdir(f,FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,f));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirNonRecursiveWithExistingDir() throws IOException {
Path f=getTestRootPath(fc,"aDir");
fc.mkdir(f,FileContext.DEFAULT_PERM,false);
Assert.assertTrue(isDir(fc,f));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop"),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop2"),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMkdirs() throws Exception {
Path testDir=getTestRootPath(fc,"test/hadoop");
Assert.assertFalse(exists(fc,testDir));
Assert.assertFalse(isFile(fc,testDir));
fc.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc,testDir));
Assert.assertFalse(isFile(fc,testDir));
fc.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc,testDir));
Assert.assertFalse(isFile(fc,testDir));
Path parentDir=testDir.getParent();
Assert.assertTrue(exists(fc,parentDir));
Assert.assertFalse(isFile(fc,parentDir));
Path grandparentDir=parentDir.getParent();
Assert.assertTrue(exists(fc,grandparentDir));
Assert.assertFalse(isFile(fc,grandparentDir));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusFilterWithEmptyPathResults() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/?"),DEFAULT_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testListStatusFilterWithNoMatches() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA2),getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().listStatus(getTestRootPath(fc,"test"),TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOpen2() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
createFile(path);
final long length=fc.getFileStatus(path).getLen();
FSDataInputStream fsdis=fc.open(path,2048);
try {
byte[] bb=new byte[(int)length];
fsdis.readFully(bb);
assertArrayEquals(data,bb);
}
finally {
fsdis.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*/*"));
Assert.assertEquals(4,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA2),paths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteOnExitUnexisting() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
boolean registered=fc.deleteOnExit(path);
assertTrue(!registered);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),TEST_X_FILTER);
Assert.assertEquals(2,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testUnsupportedSymlink() throws IOException {
Path file=getTestRootPath(fc,"file");
Path link=getTestRootPath(fc,"linkToFile");
if (!fc.getDefaultFileSystem().supportsSymlinks()) {
try {
fc.createSymlink(file,link,false);
Assert.fail("Created a symlink on a file system that " + "does not support symlinks.");
}
catch ( IOException e) {
}
createFile(file);
try {
fc.getLinkTarget(file);
Assert.fail("Got a link target on a file system that " + "does not support symlinks.");
}
catch ( IOException e) {
}
Assert.assertEquals(fc.getFileStatus(file),fc.getFileLinkStatus(file));
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDeleteRecursively() throws IOException {
Path dir=getTestRootPath(fc,"test/hadoop");
Path file=getTestRootPath(fc,"test/hadoop/file");
Path subdir=getTestRootPath(fc,"test/hadoop/subdir");
createFile(file);
fc.mkdir(subdir,FsPermission.getDefault(),true);
Assert.assertTrue("File exists",exists(fc,file));
Assert.assertTrue("Dir exists",exists(fc,dir));
Assert.assertTrue("Subdir exists",exists(fc,subdir));
try {
fc.delete(dir,false);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertTrue("File still exists",exists(fc,file));
Assert.assertTrue("Dir still exists",exists(fc,dir));
Assert.assertTrue("Subdir still exists",exists(fc,subdir));
Assert.assertTrue("Deleted",fc.delete(dir,true));
Assert.assertFalse("File doesn't exist",exists(fc,file));
Assert.assertFalse("Dir doesn't exist",exists(fc,dir));
Assert.assertFalse("Subdir doesn't exist",exists(fc,subdir));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetFileContext1() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
AbstractFileSystem asf=fc.getDefaultFileSystem();
FileContext fc2=FileContext.getFileContext(asf);
final Path path=new Path(rootPath,"zoo");
FSDataOutputStream out=fc2.create(path,EnumSet.of(CREATE),Options.CreateOpts.createParent());
out.close();
Path pathResolved=fc2.resolvePath(path);
assertEquals(pathResolved.toUri().getPath(),path.toUri().getPath());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteInNonExistentDirectory() throws IOException {
Path path=getTestRootPath(fc,"test/hadoop/file");
Assert.assertFalse("Parent doesn't exist",exists(fc,path.getParent()));
createFile(path);
Assert.assertTrue("Exists",exists(fc,path));
Assert.assertEquals("Length",data.length,fc.getFileStatus(path).getLen());
Assert.assertTrue("Parent exists",exists(fc,path.getParent()));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusNonExistentFile() throws Exception {
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf"));
Assert.assertNull(paths);
paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf/?"));
Assert.assertEquals(0,paths.length);
paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf/xyz*/?"));
Assert.assertEquals(0,paths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),DEFAULT_FILTER);
Assert.assertEquals(3,filteredPaths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSetVerifyChecksum() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
FSDataOutputStream out=fc.create(path,EnumSet.of(CREATE),Options.CreateOpts.createParent());
try {
fc.setVerifyChecksum(true,path);
out.write(data,0,data.length);
}
finally {
out.close();
}
FileStatus fileStatus=fc.getFileStatus(path);
final long len=fileStatus.getLen();
assertTrue(len == data.length);
byte[] bb=new byte[(int)len];
FSDataInputStream fsdis=fc.open(path);
try {
fsdis.read(bb);
}
finally {
fsdis.close();
}
assertArrayEquals(data,bb);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/?"),TEST_X_FILTER);
Assert.assertEquals(0,filteredPaths.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteEmptyDirectory() throws IOException {
Path dir=getTestRootPath(fc,"test/hadoop");
fc.mkdir(dir,FsPermission.getDefault(),true);
Assert.assertTrue("Dir exists",exists(fc,dir));
Assert.assertTrue("Deleted",fc.delete(dir,false));
Assert.assertFalse("Dir doesn't exist",exists(fc,dir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/ax?"));
Assert.assertEquals(2,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
Path workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test"));
fc.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(),fc.getWorkingDirectory());
workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test"));
fc.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fc.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
absoluteDir=getTestRootPath(fc,"test/existingDir2");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
Path absolutePath=new Path(absoluteDir,"foo");
fc.create(absolutePath,EnumSet.of(CREATE)).close();
fc.open(new Path("foo")).close();
fc.mkdir(new Path("newDir"),FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,new Path(absoluteDir,"newDir")));
absoluteDir=getTestRootPath(fc,"nonexistingPath");
try {
fc.setWorkingDirectory(absoluteDir);
Assert.fail("cd to non existing dir should have failed");
}
catch ( Exception e) {
}
absoluteDir=new Path(localFsRootPath,"existingDir");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
Path aRegularFile=new Path("aRegularFile");
createFile(aRegularFile);
try {
fc.setWorkingDirectory(aRegularFile);
fail("An IOException expected.");
}
catch ( IOException ioe) {
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGlobStatusWithNoMatchesInPath() throws Exception {
Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)};
if (exists(fc,testDirs[0]) == false) {
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
}
FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/?"));
Assert.assertEquals(0,paths.length);
}
TestCleaner InternalCallVerifier BooleanVerifier HybridVerifier
@After public void tearDown() throws Exception {
boolean del=fc.delete(new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test")),true);
assertTrue(del);
fc.delete(localFsRootPath,true);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=getTestRootPath(fc,"test/hadoop");
Assert.assertFalse(exists(fc,testDir));
fc.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc,testDir));
createFile(getTestRootPath(fc,"test/hadoop/file"));
Path testSubDir=getTestRootPath(fc,"test/hadoop/file/subdir");
try {
fc.mkdir(testSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc,testSubDir));
Path testDeepSubDir=getTestRootPath(fc,"test/hadoop/file/deep/sub/dir");
try {
fc.mkdir(testDeepSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc,testDeepSubDir));
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testFsStatus() throws Exception {
FsStatus fsStatus=fc.getFsStatus(null);
Assert.assertNotNull(fsStatus);
Assert.assertTrue(fsStatus.getUsed() >= 0);
Assert.assertTrue(fsStatus.getRemaining() >= 0);
Assert.assertTrue(fsStatus.getCapacity() >= 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatus() throws Exception {
Path[] testDirs={getTestRootPath(fc,"test/hadoop/a"),getTestRootPath(fc,"test/hadoop/b"),getTestRootPath(fc,"test/hadoop/c/1")};
Assert.assertFalse(exists(fc,testDirs[0]));
for ( Path path : testDirs) {
fc.mkdir(path,FsPermission.getDefault(),true);
}
FileStatus[] paths=fc.util().listStatus(getTestRootPath(fc,"test"));
Assert.assertEquals(1,paths.length);
Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),paths[0].getPath());
paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop"));
Assert.assertEquals(3,paths.length);
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),paths));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),paths));
paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop/a"));
Assert.assertEquals(0,paths.length);
RemoteIterator pathsIterator=fc.listStatus(getTestRootPath(fc,"test"));
Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),pathsIterator.next().getPath());
Assert.assertFalse(pathsIterator.hasNext());
pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop"));
FileStatus[] subdirs=new FileStatus[3];
int i=0;
while (i < 3 && pathsIterator.hasNext()) {
subdirs[i++]=pathsIterator.next();
}
Assert.assertFalse(pathsIterator.hasNext());
Assert.assertTrue(i == 3);
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),subdirs));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),subdirs));
Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),subdirs));
pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop/a"));
Assert.assertFalse(pathsIterator.hasNext());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testListCorruptFileBlocks() throws IOException {
final Path rootPath=getTestRootPath(fc,"test");
final Path path=new Path(rootPath,"zoo");
createFile(path);
try {
final RemoteIterator remoteIterator=fc.listCorruptFileBlocks(path);
if (listCorruptedBlocksSupported()) {
assertTrue(remoteIterator != null);
Path p;
while (remoteIterator.hasNext()) {
p=remoteIterator.next();
System.out.println("corrupted block: " + p);
}
try {
remoteIterator.next();
fail();
}
catch ( NoSuchElementException nsee) {
}
}
else {
fail();
}
}
catch ( UnsupportedOperationException uoe) {
if (listCorruptedBlocksSupported()) {
fail(uoe.toString());
}
else {
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSetOwner() throws IOException {
if (Path.WINDOWS) {
System.out.println("Cannot run test for Windows");
return;
}
String filename="bar";
Path f=fileContextTestHelper.getTestRootPath(fc,filename);
createFile(fc,f);
List groups=null;
try {
groups=getGroups();
System.out.println(filename + ": " + fc.getFileStatus(f).getPermission());
}
catch ( IOException e) {
System.out.println(StringUtils.stringifyException(e));
System.out.println("Cannot run test");
return;
}
if (groups == null || groups.size() < 1) {
System.out.println("Cannot run test: need at least one group. groups=" + groups);
return;
}
try {
String g0=groups.get(0);
fc.setOwner(f,null,g0);
Assert.assertEquals(g0,fc.getFileStatus(f).getGroup());
if (groups.size() > 1) {
String g1=groups.get(1);
fc.setOwner(f,null,g1);
Assert.assertEquals(g1,fc.getFileStatus(f).getGroup());
}
else {
System.out.println("Not testing changing the group since user " + "belongs to only one group.");
}
try {
fc.setOwner(f,null,null);
fail("Exception expected.");
}
catch ( IllegalArgumentException iae) {
}
}
finally {
cleanupFile(fc,f);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteDirectory() throws IOException {
String dirName="dirTest";
Path testDirPath=qualifiedPath(dirName,fc2);
Assert.assertFalse(exists(fc2,testDirPath));
fc1.mkdir(testDirPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testDirPath));
Assert.assertTrue(isDir(fc2,testDirPath));
fc2.delete(testDirPath,true);
Assert.assertFalse(isDir(fc2,testDirPath));
String dirNames[]={"deleteTest/testDir","deleteTest/test Dir","deleteTest/test*Dir","deleteTest/test#Dir","deleteTest/test1234","deleteTest/1234Test","deleteTest/test)Dir","deleteTest/test_DIr","deleteTest/()&^%$#@!~_+}{>"," ","^ "};
for ( String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
Path testPath=qualifiedPath(f,fc2);
Assert.assertFalse(exists(fc2,testPath));
fc1.mkdir(testPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(isDir(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,true));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(isDir(fc2,testPath));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteFile() throws IOException {
Path testPath=qualifiedPath("testFile",fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
fc2.delete(testPath,false);
Assert.assertFalse(exists(fc2,testPath));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteNonExistingDirectory() throws IOException {
String testDirName="testFile";
Path testPath=qualifiedPath(testDirName,fc2);
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
fc1.mkdir(testPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,false));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteNonExistingFile() throws IOException {
String testFileName="testFile";
Path testPath=qualifiedPath(testFileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,false));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testFileStatus() throws IOException {
String fileName="file1";
Path path2=fc2.makeQualified(new Path(BASE,fileName));
createFile(fc1,path2);
FsStatus fc2Status=fc2.getFsStatus(path2);
Assert.assertNotNull(fc2Status);
Assert.assertTrue(fc2Status.getCapacity() > 0);
Assert.assertTrue(fc2Status.getRemaining() > 0);
Assert.assertTrue(fc2Status.getUsed() > 0);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testModificationTime() throws IOException {
String testFile="file1";
long fc2ModificationTime, fc1ModificationTime;
Path testPath=qualifiedPath(testFile,fc2);
createFile(fc1,testPath);
fc1ModificationTime=fc1.getFileStatus(testPath).getModificationTime();
fc2ModificationTime=fc2.getFileStatus(testPath).getModificationTime();
Assert.assertEquals(fc1ModificationTime,fc2ModificationTime);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDeleteNonExistingFileInDir() throws IOException {
String testFileInDir="testDir/testDir/TestFile";
Path testPath=qualifiedPath(testFileInDir,fc2);
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
createFile(fc1,testPath);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(fc2.delete(testPath,false));
Assert.assertFalse(exists(fc2,testPath));
Assert.assertFalse(fc2.delete(testPath,false));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateFileInNonExistingDirectory() throws IOException {
String fileName="testDir/testFile";
Path testPath=qualifiedPath(fileName,fc2);
Assert.assertFalse(exists(fc2,testPath));
createFile(fc1,testPath);
Assert.assertTrue(isDir(fc2,testPath.getParent()));
Assert.assertEquals("testDir",testPath.getParent().getName());
Assert.assertTrue(exists(fc2,testPath));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatus() throws Exception {
final String hPrefix="test/hadoop";
final String[] dirs={hPrefix + "/a",hPrefix + "/b",hPrefix + "/c",hPrefix + "/1",hPrefix + "/#@#@",hPrefix + "/&*#$#$@234"};
ArrayList testDirs=new ArrayList();
for ( String d : dirs) {
if (!isTestableFileNameOnPlatform(d)) {
continue;
}
testDirs.add(qualifiedPath(d,fc2));
}
Assert.assertFalse(exists(fc1,testDirs.get(0)));
for ( Path path : testDirs) {
fc1.mkdir(path,FsPermission.getDefault(),true);
}
FileStatus[] paths=fc1.util().listStatus(qualifiedPath("test",fc1));
Assert.assertEquals(1,paths.length);
Assert.assertEquals(qualifiedPath(hPrefix,fc1),paths[0].getPath());
paths=fc1.util().listStatus(qualifiedPath(hPrefix,fc1));
Assert.assertEquals(testDirs.size(),paths.length);
for (int i=0; i < testDirs.size(); i++) {
boolean found=false;
for (int j=0; j < paths.length; j++) {
if (qualifiedPath(testDirs.get(i).toString(),fc1).equals(paths[j].getPath())) {
found=true;
}
}
Assert.assertTrue(testDirs.get(i) + " not found",found);
}
paths=fc1.util().listStatus(qualifiedPath(dirs[0],fc1));
Assert.assertEquals(0,paths.length);
RemoteIterator pathsItor=fc1.listStatus(qualifiedPath("test",fc1));
Assert.assertEquals(qualifiedPath(hPrefix,fc1),pathsItor.next().getPath());
Assert.assertFalse(pathsItor.hasNext());
pathsItor=fc1.listStatus(qualifiedPath(hPrefix,fc1));
int dirLen=0;
for (; pathsItor.hasNext(); dirLen++) {
boolean found=false;
FileStatus stat=pathsItor.next();
for (int j=0; j < dirs.length; j++) {
if (qualifiedPath(dirs[j],fc1).equals(stat.getPath())) {
found=true;
break;
}
}
Assert.assertTrue(stat.getPath() + " not found",found);
}
Assert.assertEquals(testDirs.size(),dirLen);
pathsItor=fc1.listStatus(qualifiedPath(dirs[0],fc1));
Assert.assertFalse(pathsItor.hasNext());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir=qualifiedPath("test/hadoop",fc2);
Assert.assertFalse(exists(fc2,testDir));
fc2.mkdir(testDir,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testDir));
createFile(fc1,qualifiedPath("test/hadoop/file",fc2));
Path testSubDir=qualifiedPath("test/hadoop/file/subdir",fc2);
try {
fc1.mkdir(testSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc1,testSubDir));
Path testDeepSubDir=qualifiedPath("test/hadoop/file/deep/sub/dir",fc1);
try {
fc2.mkdir(testDeepSubDir,FsPermission.getDefault(),true);
Assert.fail("Should throw IOException.");
}
catch ( IOException e) {
}
Assert.assertFalse(exists(fc1,testDeepSubDir));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCreateDirectory() throws IOException {
Path path=qualifiedPath("test/hadoop",fc2);
Path falsePath=qualifiedPath("path/doesnot.exist",fc2);
Path subDirPath=qualifiedPath("dir0",fc2);
Assert.assertFalse(exists(fc1,path));
Assert.assertFalse(isFile(fc1,path));
Assert.assertFalse(isDir(fc1,path));
fc1.mkdir(path,FsPermission.getDefault(),true);
Assert.assertTrue(isDir(fc2,path));
Assert.assertTrue(exists(fc2,path));
Assert.assertFalse(isFile(fc2,path));
fc1.mkdir(subDirPath,FsPermission.getDefault(),true);
fc1.mkdir(subDirPath,FsPermission.getDefault(),true);
fc1.mkdir(subDirPath,FsPermission.getDefault(),true);
Path parentDir=path.getParent();
Assert.assertTrue(exists(fc2,parentDir));
Assert.assertFalse(isFile(fc2,parentDir));
Path grandparentDir=parentDir.getParent();
Assert.assertTrue(exists(fc2,grandparentDir));
Assert.assertFalse(isFile(fc2,grandparentDir));
Assert.assertFalse(exists(fc2,falsePath));
Assert.assertFalse(isDir(fc2,falsePath));
String dirNames[]={"createTest/testDir","createTest/test Dir","deleteTest/test*Dir","deleteTest/test#Dir","deleteTest/test1234","deleteTest/test_DIr","deleteTest/1234Test","deleteTest/test)Dir","deleteTest/()&^%$#@!~_+}{>"," ","^ "};
for ( String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
Path testPath=qualifiedPath(f,fc2);
Assert.assertFalse(exists(fc2,testPath));
fc1.mkdir(testPath,FsPermission.getDefault(),true);
Assert.assertTrue(exists(fc2,testPath));
Assert.assertTrue(isDir(fc2,testPath));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testRecursiveFcCopy() throws Exception {
final String ts="some random text";
Path dir1=fileContextTestHelper.getTestRootPath(fc,"dir1");
Path dir2=fileContextTestHelper.getTestRootPath(fc,"dir2");
Path file1=new Path(dir1,"file1");
fc.mkdir(dir1,null,false);
writeFile(fc,file1,ts.getBytes());
assertTrue(fc.util().exists(file1));
Path file2=new Path(dir2,"file1");
fc.util().copy(dir1,dir2);
assertTrue("Failed to copy file2 ",fc.util().exists(file2));
assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),readFile(fc,file2,ts.getBytes().length)));
}
InternalCallVerifier BooleanVerifier
@Test public void testFcCopy() throws Exception {
final String ts="some random text";
Path file1=fileContextTestHelper.getTestRootPath(fc,"file1");
Path file2=fileContextTestHelper.getTestRootPath(fc,"file2");
writeFile(fc,file1,ts.getBytes());
assertTrue(fc.util().exists(file1));
fc.util().copy(file1,file2);
assertTrue("Failed to copy file2 ",fc.util().exists(file2));
assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),readFile(fc,file2,ts.getBytes().length)));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameDirToSymlinkToFile() throws IOException {
Path dir1=new Path(testBaseDir1());
Path file=new Path(testBaseDir2(),"file");
Path linkToFile=new Path(testBaseDir2(),"linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file,linkToFile,false);
try {
wrapper.rename(dir1,linkToFile,Rename.OVERWRITE);
fail("Renamed directory to a symlink");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(dir1));
assertTrue(wrapper.exists(linkToFile));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testCreateLinkToDotDotPrefix() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path dir=new Path(testBaseDir1(),"test");
Path link=new Path(testBaseDir1(),"test/link");
createAndWriteFile(file);
wrapper.mkdir(dir,FsPermission.getDefault(),false);
wrapper.setWorkingDirectory(dir);
wrapper.createSymlink(new Path("../file"),link,false);
readFile(link);
assertEquals(new Path("../file"),wrapper.getLinkTarget(link));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCreateLinkToDirectory() throws IOException {
Path dir1=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
createAndWriteFile(file);
wrapper.createSymlink(dir1,linkToDir,false);
assertFalse(wrapper.isFile(linkToDir));
assertTrue(wrapper.isDir(linkToDir));
assertTrue(wrapper.getFileStatus(linkToDir).isDirectory());
assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testRenameSymlinkToDirItLinksTo() throws IOException {
if ("file".equals(getScheme())) {
return;
}
Path dir=new Path(testBaseDir1(),"dir");
Path link=new Path(testBaseDir1(),"linkToDir");
wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false);
wrapper.createSymlink(dir,link,false);
try {
wrapper.rename(link,dir);
fail("Renamed symlink to its target");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
assertTrue(wrapper.isDir(dir));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(dir,wrapper.getLinkTarget(link));
try {
wrapper.rename(link,dir,Rename.OVERWRITE);
fail("Renamed symlink to its target");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
assertTrue(wrapper.isDir(dir));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(dir,wrapper.getLinkTarget(link));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testSetWDNotResolvesLinks() throws IOException {
Path dir=new Path(testBaseDir1());
Path linkToDir=new Path(testBaseDir1() + "/link");
wrapper.createSymlink(dir,linkToDir,false);
wrapper.setWorkingDirectory(linkToDir);
assertEquals(linkToDir.getName(),wrapper.getWorkingDirectory().getName());
}
UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testRenameSymlink() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path file=new Path(testBaseDir1(),"file");
Path link1=new Path(testBaseDir1(),"linkToFile1");
Path link2=new Path(testBaseDir1(),"linkToFile2");
createAndWriteFile(file);
wrapper.createSymlink(file,link1,false);
wrapper.rename(link1,link2);
assertTrue(wrapper.getFileLinkStatus(link2).isSymlink());
assertFalse(wrapper.getFileStatus(link2).isDirectory());
readFile(link2);
readFile(file);
try {
createAndWriteFile(link2);
fail("link was not renamed");
}
catch ( IOException x) {
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameDirToDanglingSymlink() throws IOException {
Path dir=new Path(testBaseDir1());
Path link=new Path(testBaseDir2(),"linkToFile");
wrapper.createSymlink(new Path("/doesNotExist"),link,false);
try {
wrapper.rename(dir,link,Rename.OVERWRITE);
fail("Renamed directory to a symlink");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(dir));
assertTrue(wrapper.getFileLinkStatus(link) != null);
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testSetTimes() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
long at=wrapper.getFileLinkStatus(link).getAccessTime();
wrapper.setTimes(link,2L,3L);
if (!"file".equals(getScheme())) {
assertEquals(at,wrapper.getFileLinkStatus(link).getAccessTime());
assertEquals(3,wrapper.getFileStatus(file).getAccessTime());
assertEquals(2,wrapper.getFileStatus(file).getModificationTime());
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkUsingFullyQualPaths() throws IOException {
Path fileAbs=new Path(testBaseDir1(),"file");
Path linkAbs=new Path(testBaseDir1(),"linkToFile");
Path fileQual=new Path(testURI().toString(),fileAbs);
Path linkQual=new Path(testURI().toString(),linkAbs);
createAndWriteFile(fileAbs);
wrapper.createSymlink(fileQual,linkQual,false);
checkLink(linkAbs,"file".equals(getScheme()) ? fileAbs : fileQual,fileQual);
Path dir1=new Path(testBaseDir1());
Path dir2=new Path(testBaseDir2());
Path linkViaDir2=new Path(testBaseDir2(),"linkToFile");
wrapper.rename(dir1,dir2,Rename.OVERWRITE);
assertEquals(fileQual,wrapper.getFileLinkStatus(linkViaDir2).getSymlink());
try {
readFile(linkViaDir2);
fail("The target should not exist");
}
catch ( FileNotFoundException x) {
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkCanCreateParent() throws IOException {
Path file=new Path(testBaseDir1() + "/file");
Path link=new Path(testBaseDir2() + "/linkToFile");
createAndWriteFile(file);
wrapper.delete(new Path(testBaseDir2()),true);
try {
wrapper.createSymlink(file,link,false);
fail("Created link without first creating parent dir");
}
catch ( IOException x) {
}
assertFalse(wrapper.exists(new Path(testBaseDir2())));
wrapper.createSymlink(file,link,true);
readFile(link);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameSymlinkNonExistantDest() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path link1=new Path(testBaseDir1(),"linkToFile1");
Path link2=new Path(testBaseDir1(),"linkToFile2");
createAndWriteFile(file);
wrapper.createSymlink(file,link1,false);
wrapper.rename(link1,link2);
assertTrue(wrapper.getFileLinkStatus(link2).isSymlink() || emulatingSymlinksOnWindows());
readFile(link2);
readFile(file);
assertFalse(wrapper.exists(link1));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameFileToDanglingSymlink() throws IOException {
if ("file".equals(getScheme())) {
return;
}
Path file1=new Path(testBaseDir1(),"file1");
Path link=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file1);
wrapper.createSymlink(new Path("/doesNotExist"),link,false);
try {
wrapper.rename(file1,link);
}
catch ( IOException e) {
}
wrapper.rename(file1,link,Rename.OVERWRITE);
assertFalse(wrapper.exists(file1));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isFile(link));
assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testRenameSymlinkToFileItLinksTo() throws IOException {
if ("file".equals(getScheme())) {
return;
}
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
try {
wrapper.rename(link,file);
fail("Renamed symlink to its target");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
assertTrue(wrapper.isFile(file));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(file,wrapper.getLinkTarget(link));
try {
wrapper.rename(link,file,Rename.OVERWRITE);
fail("Renamed symlink to its target");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
assertTrue(wrapper.isFile(file));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isSymlink(link));
assertEquals(file,wrapper.getLinkTarget(link));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameFileToSymlinkToFile() throws IOException {
Path file1=new Path(testBaseDir1(),"file1");
Path file2=new Path(testBaseDir1(),"file2");
Path link=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file1);
createAndWriteFile(file2);
wrapper.createSymlink(file2,link,false);
try {
wrapper.rename(file1,link);
fail("Renamed file to symlink w/o overwrite");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
wrapper.rename(file1,link,Rename.OVERWRITE);
assertFalse(wrapper.exists(file1));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isFile(link));
assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameSymlinkViaSymlink() throws IOException {
Path baseDir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"link");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path linkViaLink=new Path(linkToDir,"link");
Path linkNewViaLink=new Path(linkToDir,"linkNew");
createAndWriteFile(file);
wrapper.createSymlink(file,link,false);
wrapper.createSymlink(baseDir,linkToDir,false);
wrapper.rename(linkViaLink,linkNewViaLink);
assertFalse(wrapper.exists(linkViaLink));
assertTrue(wrapper.exists(file));
assertTrue(wrapper.getFileLinkStatus(linkNewViaLink).isSymlink() || emulatingSymlinksOnWindows());
readFile(linkNewViaLink);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testListStatusUsingLink() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"link");
createAndWriteFile(file);
wrapper.createSymlink(new Path(testBaseDir1()),link,false);
FileStatus[] stats=wrapper.listStatus(link);
assertTrue(stats.length == 2 || stats.length == 3);
RemoteIterator statsItor=wrapper.listStatusIterator(link);
int dirLen=0;
while (statsItor.hasNext()) {
statsItor.next();
dirLen++;
}
assertTrue(dirLen == 2 || dirLen == 3);
}
BranchVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testStatLinkToFile() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path file=new Path(testBaseDir1() + "/file");
Path linkToFile=new Path(testBaseDir1() + "/linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(file,linkToFile,false);
assertFalse(wrapper.getFileLinkStatus(linkToFile).isDirectory());
assertTrue(wrapper.isSymlink(linkToFile));
assertTrue(wrapper.isFile(linkToFile));
assertFalse(wrapper.isDir(linkToFile));
assertEquals(file,wrapper.getLinkTarget(linkToFile));
if (!"file".equals(getScheme())) {
assertEquals(wrapper.getFileStatus(file),wrapper.getFileStatus(linkToFile));
assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(linkToFile).getPath());
assertEquals(wrapper.makeQualified(linkToFile),wrapper.getFileLinkStatus(linkToFile).getPath());
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testCreateLinkToDotDot() throws IOException {
Path file=new Path(testBaseDir1(),"test/file");
Path dotDot=new Path(testBaseDir1(),"test/..");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"test/file");
assertEquals(new Path(testBaseDir1()),dotDot);
createAndWriteFile(file);
wrapper.createSymlink(dotDot,linkToDir,false);
readFile(fileViaLink);
assertEquals(fileSize,wrapper.getFileStatus(fileViaLink).getLen());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAccessDirViaSymlink() throws IOException {
Path baseDir=new Path(testBaseDir1());
Path dir=new Path(testBaseDir1(),"dir");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path dirViaLink=new Path(linkToDir,"dir");
wrapper.createSymlink(baseDir,linkToDir,false);
wrapper.mkdir(dirViaLink,FileContext.DEFAULT_PERM,true);
assertTrue(wrapper.getFileStatus(dirViaLink).isDirectory());
FileStatus[] stats=wrapper.listStatus(dirViaLink);
assertEquals(0,stats.length);
RemoteIterator statsItor=wrapper.listStatusIterator(dirViaLink);
assertFalse(statsItor.hasNext());
wrapper.delete(dirViaLink,false);
assertFalse(wrapper.exists(dirViaLink));
assertFalse(wrapper.exists(dir));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameDirViaSymlink() throws IOException {
Path baseDir=new Path(testBaseDir1());
Path dir=new Path(baseDir,"dir");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path dirViaLink=new Path(linkToDir,"dir");
Path dirNewViaLink=new Path(linkToDir,"dirNew");
wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false);
wrapper.createSymlink(baseDir,linkToDir,false);
assertTrue(wrapper.exists(dirViaLink));
wrapper.rename(dirViaLink,dirNewViaLink);
assertFalse(wrapper.exists(dirViaLink));
assertFalse(wrapper.exists(dir));
assertTrue(wrapper.exists(dirNewViaLink));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testAccessFileViaInterSymlinkQualTarget() throws IOException {
Path baseDir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
wrapper.createSymlink(wrapper.makeQualified(baseDir),linkToDir,false);
createAndWriteFile(fileViaLink);
assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file));
assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(fileViaLink));
readFile(fileViaLink);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameDirToSymlinkToDir() throws IOException {
Path dir1=new Path(testBaseDir1());
Path subDir=new Path(testBaseDir2(),"subDir");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
wrapper.mkdir(subDir,FileContext.DEFAULT_PERM,false);
wrapper.createSymlink(subDir,linkToDir,false);
try {
wrapper.rename(dir1,linkToDir,Rename.OVERWRITE);
fail("Renamed directory to a symlink");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(dir1));
assertTrue(wrapper.exists(linkToDir));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testStatDanglingLink() throws IOException {
Path file=new Path("/noSuchFile");
Path link=new Path(testBaseDir1() + "/link");
wrapper.createSymlink(file,link,false);
assertFalse(wrapper.getFileLinkStatus(link).isDirectory());
assertTrue(wrapper.getFileLinkStatus(link).isSymlink());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testStatLinkToDir() throws IOException {
Path dir=new Path(testBaseDir1());
Path linkToDir=new Path(testBaseDir1() + "/linkToDir");
wrapper.createSymlink(dir,linkToDir,false);
assertFalse(wrapper.getFileStatus(linkToDir).isSymlink());
assertTrue(wrapper.isDir(linkToDir));
assertFalse(wrapper.getFileLinkStatus(linkToDir).isDirectory());
assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink());
assertFalse(wrapper.isFile(linkToDir));
assertTrue(wrapper.isDir(linkToDir));
assertEquals(dir,wrapper.getLinkTarget(linkToDir));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkUsingAbsPaths() throws IOException {
Path fileAbs=new Path(testBaseDir1() + "/file");
Path linkAbs=new Path(testBaseDir1() + "/linkToFile");
Path schemeAuth=new Path(testURI().toString());
Path fileQual=new Path(schemeAuth,testBaseDir1() + "/file");
createAndWriteFile(fileAbs);
wrapper.createSymlink(fileAbs,linkAbs,false);
checkLink(linkAbs,fileAbs,fileQual);
Path dir1=new Path(testBaseDir1());
Path dir2=new Path(testBaseDir2());
Path linkViaDir2=new Path(testBaseDir2(),"linkToFile");
wrapper.rename(dir1,dir2,Rename.OVERWRITE);
assertEquals(fileQual,wrapper.getFileLinkStatus(linkViaDir2).getSymlink());
try {
readFile(linkViaDir2);
fail("The target should not exist");
}
catch ( FileNotFoundException x) {
}
}
InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAccessFileViaInterSymlinkRelTarget() throws IOException {
assumeTrue(!"file".equals(getScheme()));
Path dir=new Path(testBaseDir1(),"dir");
Path file=new Path(dir,"file");
Path linkToDir=new Path(testBaseDir1(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false);
wrapper.createSymlink(new Path("dir"),linkToDir,false);
createAndWriteFile(fileViaLink);
assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(file).getPath());
assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file));
assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(fileViaLink));
assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(file));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCreateLinkToLink() throws IOException {
Path dir1=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path linkToLink=new Path(testBaseDir2(),"linkToLink");
Path fileViaLink=new Path(testBaseDir2(),"linkToLink/file");
createAndWriteFile(file);
wrapper.createSymlink(dir1,linkToDir,false);
wrapper.createSymlink(linkToDir,linkToLink,false);
assertTrue(wrapper.isFile(fileViaLink));
assertFalse(wrapper.isDir(fileViaLink));
assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
assertFalse(wrapper.getFileStatus(fileViaLink).isDirectory());
readFile(fileViaLink);
}
InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testStatRelLinkToFile() throws IOException {
assumeTrue(!"file".equals(getScheme()));
Path file=new Path(testBaseDir1(),"file");
Path linkToFile=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(new Path("file"),linkToFile,false);
assertEquals(wrapper.getFileStatus(file),wrapper.getFileStatus(linkToFile));
assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(linkToFile).getPath());
assertEquals(wrapper.makeQualified(linkToFile),wrapper.getFileLinkStatus(linkToFile).getPath());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameFileViaSymlink() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
Path fileNewViaLink=new Path(linkToDir,"fileNew");
createAndWriteFile(file);
wrapper.createSymlink(dir,linkToDir,false);
wrapper.rename(fileViaLink,fileNewViaLink);
assertFalse(wrapper.exists(fileViaLink));
assertFalse(wrapper.exists(file));
assertTrue(wrapper.exists(fileNewViaLink));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testCreateLinkUsingRelPaths() throws IOException {
Path fileAbs=new Path(testBaseDir1(),"file");
Path linkAbs=new Path(testBaseDir1(),"linkToFile");
Path schemeAuth=new Path(testURI().toString());
Path fileQual=new Path(schemeAuth,testBaseDir1() + "/file");
createAndWriteFile(fileAbs);
wrapper.setWorkingDirectory(new Path(testBaseDir1()));
wrapper.createSymlink(new Path("file"),new Path("linkToFile"),false);
checkLink(linkAbs,new Path("file"),fileQual);
Path dir1=new Path(testBaseDir1());
Path dir2=new Path(testBaseDir2());
Path linkViaDir2=new Path(testBaseDir2(),"linkToFile");
Path fileViaDir2=new Path(schemeAuth,testBaseDir2() + "/file");
wrapper.rename(dir1,dir2,Rename.OVERWRITE);
FileStatus[] stats=wrapper.listStatus(dir2);
assertEquals(fileViaDir2,wrapper.getFileLinkStatus(linkViaDir2).getSymlink());
readFile(linkViaDir2);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameFileToDestViaSymlink() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path subDir=new Path(linkToDir,"subDir");
createAndWriteFile(file);
wrapper.createSymlink(dir,linkToDir,false);
wrapper.mkdir(subDir,FileContext.DEFAULT_PERM,false);
try {
wrapper.rename(file,subDir);
fail("Renamed file to a directory");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof IOException);
}
assertTrue(wrapper.exists(file));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRenameFileToSymlinkToDir() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path subDir=new Path(testBaseDir1(),"subDir");
Path link=new Path(testBaseDir1(),"link");
wrapper.mkdir(subDir,FileContext.DEFAULT_PERM,false);
wrapper.createSymlink(subDir,link,false);
createAndWriteFile(file);
try {
wrapper.rename(file,link);
fail("Renamed file to symlink w/o overwrite");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
wrapper.rename(file,link,Rename.OVERWRITE);
assertFalse(wrapper.exists(file));
assertTrue(wrapper.exists(link));
assertTrue(wrapper.isFile(link));
assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testRenameFileWithDestParentSymlink() throws IOException {
Path link=new Path(testBaseDir1(),"link");
Path file1=new Path(testBaseDir1(),"file1");
Path file2=new Path(testBaseDir1(),"file2");
Path file3=new Path(link,"file3");
Path dir2=new Path(testBaseDir2());
wrapper.createSymlink(dir2,link,false);
createAndWriteFile(file1);
wrapper.rename(file1,file3);
assertFalse(wrapper.exists(file1));
assertTrue(wrapper.exists(file3));
wrapper.rename(file3,file1);
wrapper.delete(link,false);
wrapper.createSymlink(file2,link,false);
try {
wrapper.rename(file1,file3);
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileNotFoundException);
}
createAndWriteFile(file2);
try {
wrapper.rename(file1,file3);
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof ParentNotDirectoryException);
}
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testRenameSymlinkToExistingFile() throws IOException {
Path file1=new Path(testBaseDir1(),"file");
Path file2=new Path(testBaseDir1(),"someFile");
Path link=new Path(testBaseDir1(),"linkToFile");
createAndWriteFile(file1);
createAndWriteFile(file2);
wrapper.createSymlink(file2,link,false);
try {
wrapper.rename(link,file1);
fail("Renamed w/o passing overwrite");
}
catch ( IOException e) {
assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
}
wrapper.rename(link,file1,Rename.OVERWRITE);
assertFalse(wrapper.exists(link));
if (!emulatingSymlinksOnWindows()) {
assertTrue(wrapper.getFileLinkStatus(file1).isSymlink());
assertEquals(file2,wrapper.getLinkTarget(file1));
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCreateDirViaSymlink() throws IOException {
Path dir1=new Path(testBaseDir1());
Path subDir=new Path(testBaseDir1(),"subDir");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path subDirViaLink=new Path(linkToDir,"subDir");
wrapper.createSymlink(dir1,linkToDir,false);
wrapper.mkdir(subDirViaLink,FileContext.DEFAULT_PERM,true);
assertTrue(wrapper.isDir(subDirViaLink));
wrapper.delete(subDirViaLink,false);
assertFalse(wrapper.exists(subDirViaLink));
assertFalse(wrapper.exists(subDir));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCreateFileViaSymlink() throws IOException {
Path dir=new Path(testBaseDir1());
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
wrapper.createSymlink(dir,linkToDir,false);
createAndWriteFile(fileViaLink);
assertTrue(wrapper.isFile(fileViaLink));
assertFalse(wrapper.isDir(fileViaLink));
assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
assertFalse(wrapper.getFileStatus(fileViaLink).isDirectory());
readFile(fileViaLink);
wrapper.delete(fileViaLink,true);
assertFalse(wrapper.exists(fileViaLink));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLinkStatusAndTargetWithNonLink() throws IOException {
Path schemeAuth=new Path(testURI().toString());
Path dir=new Path(testBaseDir1());
Path dirQual=new Path(schemeAuth,dir.toString());
Path file=new Path(testBaseDir1(),"file");
Path fileQual=new Path(schemeAuth,file.toString());
createAndWriteFile(file);
assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file));
assertEquals(wrapper.getFileStatus(dir),wrapper.getFileLinkStatus(dir));
try {
wrapper.getLinkTarget(file);
fail("Get link target on non-link should throw an IOException");
}
catch ( IOException x) {
assertEquals("Path " + fileQual + " is not a symbolic link",x.getMessage());
}
try {
wrapper.getLinkTarget(dir);
fail("Get link target on non-link should throw an IOException");
}
catch ( IOException x) {
assertEquals("Path " + dirQual + " is not a symbolic link",x.getMessage());
}
}
BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkUsingPartQualPath2() throws IOException {
Path link=new Path(testBaseDir1(),"linkToFile");
Path fileWoScheme=new Path("//" + testURI().getAuthority() + testBaseDir1()+ "/file");
if ("file".equals(getScheme())) {
return;
}
wrapper.createSymlink(fileWoScheme,link,false);
assertEquals(fileWoScheme,wrapper.getLinkTarget(link));
assertEquals(fileWoScheme.toString(),wrapper.getFileLinkStatus(link).getSymlink().toString());
try {
readFile(link);
fail("Accessed a file with w/o scheme");
}
catch ( IOException e) {
if (wrapper instanceof FileContextTestWrapper) {
assertEquals("No AbstractFileSystem for scheme: null",e.getMessage());
}
else if (wrapper instanceof FileSystemTestWrapper) {
assertEquals("No FileSystem for scheme: null",e.getMessage());
}
}
}
InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkViaLink() throws IOException {
assumeTrue(!emulatingSymlinksOnWindows());
Path dir1=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
Path linkToFile=new Path(linkToDir,"linkToFile");
createAndWriteFile(file);
wrapper.createSymlink(dir1,linkToDir,false);
wrapper.createSymlink(fileViaLink,linkToFile,false);
assertTrue(wrapper.isFile(linkToFile));
assertTrue(wrapper.getFileLinkStatus(linkToFile).isSymlink());
readFile(linkToFile);
assertEquals(fileSize,wrapper.getFileStatus(linkToFile).getLen());
assertEquals(fileViaLink,wrapper.getLinkTarget(linkToFile));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAccessFileViaInterSymlinkAbsTarget() throws IOException {
Path baseDir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path fileNew=new Path(baseDir,"fileNew");
Path linkToDir=new Path(testBaseDir2(),"linkToDir");
Path fileViaLink=new Path(linkToDir,"file");
Path fileNewViaLink=new Path(linkToDir,"fileNew");
wrapper.createSymlink(baseDir,linkToDir,false);
createAndWriteFile(fileViaLink);
assertTrue(wrapper.exists(fileViaLink));
assertTrue(wrapper.isFile(fileViaLink));
assertFalse(wrapper.isDir(fileViaLink));
assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
assertFalse(wrapper.isDir(fileViaLink));
assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file));
assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(fileViaLink));
readFile(fileViaLink);
appendToFile(fileViaLink);
wrapper.rename(fileViaLink,fileNewViaLink);
assertFalse(wrapper.exists(fileViaLink));
assertTrue(wrapper.exists(fileNewViaLink));
readFile(fileNewViaLink);
assertEquals(wrapper.getFileBlockLocations(fileNew,0,1).length,wrapper.getFileBlockLocations(fileNewViaLink,0,1).length);
assertEquals(wrapper.getFileChecksum(fileNew),wrapper.getFileChecksum(fileNewViaLink));
wrapper.delete(fileNewViaLink,true);
assertFalse(wrapper.exists(fileNewViaLink));
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkUsingPartQualPath1() throws IOException {
assumeTrue(!"file".equals(getScheme()));
Path schemeAuth=new Path(testURI().toString());
Path fileWoHost=new Path(getScheme() + "://" + testBaseDir1()+ "/file");
Path link=new Path(testBaseDir1() + "/linkToFile");
Path linkQual=new Path(schemeAuth,testBaseDir1() + "/linkToFile");
FSTestWrapper localWrapper=wrapper.getLocalFSWrapper();
wrapper.createSymlink(fileWoHost,link,false);
assertEquals(fileWoHost,wrapper.getLinkTarget(linkQual));
assertEquals(fileWoHost.toString(),wrapper.getFileLinkStatus(link).getSymlink().toString());
assertEquals(fileWoHost.toString(),wrapper.getFileLinkStatus(linkQual).getSymlink().toString());
if (wrapper instanceof FileContextTestWrapper) {
assertEquals(fileWoHost.toString(),localWrapper.getFileLinkStatus(linkQual).getSymlink().toString());
}
try {
readFile(link);
fail("DFS requires URIs with schemes have an authority");
}
catch ( java.lang.RuntimeException e) {
assertTrue(wrapper instanceof FileContextTestWrapper);
}
catch ( FileNotFoundException e) {
assertTrue(wrapper instanceof FileSystemTestWrapper);
GenericTestUtils.assertExceptionContains("File does not exist: /test1/file",e);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCorruptedChecksum() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testCorruptChecksum");
Path checksumPath=localFs.getChecksumFile(testPath);
FSDataOutputStream out=localFs.create(testPath,true);
out.write("testing 1 2 3".getBytes());
out.close();
assertTrue(localFs.exists(checksumPath));
FileStatus stat=localFs.getFileStatus(checksumPath);
out=localFs.getRawFileSystem().create(testPath,true);
out.write("testing stale checksum".getBytes());
out.close();
assertTrue(localFs.exists(checksumPath));
assertEquals(stat,localFs.getFileStatus(checksumPath));
Exception e=null;
try {
localFs.setVerifyChecksum(true);
readFile(localFs,testPath,1024);
}
catch ( ChecksumException ce) {
e=ce;
}
finally {
assertNotNull("got checksum error",e);
}
localFs.setVerifyChecksum(false);
String str=readFile(localFs,testPath,1024);
assertEquals("testing stale checksum",str);
}
InternalCallVerifier BooleanVerifier
@Test public void testStreamType() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testStreamType");
localFs.create(testPath).close();
FSDataInputStream in=null;
localFs.setVerifyChecksum(true);
in=localFs.open(testPath);
assertTrue("stream is input checker",in.getWrappedStream() instanceof FSInputChecker);
localFs.setVerifyChecksum(false);
in=localFs.open(testPath);
assertFalse("stream is not input checker",in.getWrappedStream() instanceof FSInputChecker);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test to ensure that if the checksum file is truncated, a
* ChecksumException is thrown
*/
@Test public void testTruncatedChecksum() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testtruncatedcrc");
FSDataOutputStream fout=localFs.create(testPath);
fout.write("testing truncation".getBytes());
fout.close();
Path checksumFile=localFs.getChecksumFile(testPath);
FileSystem rawFs=localFs.getRawFileSystem();
FSDataInputStream checksumStream=rawFs.open(checksumFile);
byte buf[]=new byte[8192];
int read=checksumStream.read(buf,0,buf.length);
checksumStream.close();
FSDataOutputStream replaceStream=rawFs.create(checksumFile);
replaceStream.write(buf,0,read - 1);
replaceStream.close();
try {
readFile(localFs,testPath,1024);
fail("Did not throw a ChecksumException when reading truncated " + "crc file");
}
catch ( ChecksumException ie) {
}
localFs.setVerifyChecksum(false);
String str=readFile(localFs,testPath,1024).toString();
assertTrue("read","testing truncation".equals(str));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testVerifyChecksum() throws Exception {
Path testPath=new Path(TEST_ROOT_DIR,"testPath");
Path testPath11=new Path(TEST_ROOT_DIR,"testPath11");
FSDataOutputStream fout=localFs.create(testPath);
fout.write("testing".getBytes());
fout.close();
fout=localFs.create(testPath11);
fout.write("testing you".getBytes());
fout.close();
readFile(localFs,testPath,128);
readFile(localFs,testPath,511);
readFile(localFs,testPath,512);
readFile(localFs,testPath,513);
readFile(localFs,testPath,1023);
readFile(localFs,testPath,1024);
readFile(localFs,testPath,1025);
localFs.delete(localFs.getChecksumFile(testPath),true);
assertTrue("checksum deleted",!localFs.exists(localFs.getChecksumFile(testPath)));
FileUtil.copy(localFs,localFs.getChecksumFile(testPath11),localFs,localFs.getChecksumFile(testPath),false,true,localFs.getConf());
assertTrue("checksum exists",localFs.exists(localFs.getChecksumFile(testPath)));
boolean errorRead=false;
try {
readFile(localFs,testPath,1024);
}
catch ( ChecksumException ie) {
errorRead=true;
}
assertTrue("error reading",errorRead);
localFs.setVerifyChecksum(false);
String str=readFile(localFs,testPath,1024).toString();
assertTrue("read","testing".equals(str));
}
InternalCallVerifier EqualityVerifier
@Test public void testOldArgsWithIndex(){
String[] arrayArgs=new String[]{"ignore","-a","b","-c"};
{
CommandFormat cf=new CommandFormat(0,9,"a","c");
List parsedArgs=cf.parse(arrayArgs,0);
assertEquals(setOf(),cf.getOpts());
assertEquals(listOf("ignore","-a","b","-c"),parsedArgs);
}
{
CommandFormat cf=new CommandFormat(0,9,"a","c");
List parsedArgs=cf.parse(arrayArgs,1);
assertEquals(setOf("a"),cf.getOpts());
assertEquals(listOf("b","-c"),parsedArgs);
}
{
CommandFormat cf=new CommandFormat(0,9,"a","c");
List parsedArgs=cf.parse(arrayArgs,2);
assertEquals(setOf(),cf.getOpts());
assertEquals(listOf("b","-c"),parsedArgs);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testReadFields() throws IOException {
long length=11111;
long fileCount=22222;
long directoryCount=33333;
long quota=44444;
long spaceConsumed=55555;
long spaceQuota=66666;
ContentSummary contentSummary=new ContentSummary();
DataInput in=mock(DataInput.class);
when(in.readLong()).thenReturn(length).thenReturn(fileCount).thenReturn(directoryCount).thenReturn(quota).thenReturn(spaceConsumed).thenReturn(spaceQuota);
contentSummary.readFields(in);
assertEquals("getLength",length,contentSummary.getLength());
assertEquals("getFileCount",fileCount,contentSummary.getFileCount());
assertEquals("getDirectoryCount",directoryCount,contentSummary.getDirectoryCount());
assertEquals("getQuota",quota,contentSummary.getQuota());
assertEquals("getSpaceConsumed",spaceConsumed,contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota",spaceQuota,contentSummary.getSpaceQuota());
}
InternalCallVerifier EqualityVerifier
@Test public void testConstructorNoQuota(){
long length=11111;
long fileCount=22222;
long directoryCount=33333;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount);
assertEquals("getLength",length,contentSummary.getLength());
assertEquals("getFileCount",fileCount,contentSummary.getFileCount());
assertEquals("getDirectoryCount",directoryCount,contentSummary.getDirectoryCount());
assertEquals("getQuota",-1,contentSummary.getQuota());
assertEquals("getSpaceConsumed",length,contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota",-1,contentSummary.getSpaceQuota());
}
InternalCallVerifier EqualityVerifier
@Test public void testConstructorWithQuota(){
long length=11111;
long fileCount=22222;
long directoryCount=33333;
long quota=44444;
long spaceConsumed=55555;
long spaceQuota=66666;
ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota);
assertEquals("getLength",length,contentSummary.getLength());
assertEquals("getFileCount",fileCount,contentSummary.getFileCount());
assertEquals("getDirectoryCount",directoryCount,contentSummary.getDirectoryCount());
assertEquals("getQuota",quota,contentSummary.getQuota());
assertEquals("getSpaceConsumed",spaceConsumed,contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota",spaceQuota,contentSummary.getSpaceQuota());
}
InternalCallVerifier EqualityVerifier
@Test public void testConstructorEmpty(){
ContentSummary contentSummary=new ContentSummary();
assertEquals("getLength",0,contentSummary.getLength());
assertEquals("getFileCount",0,contentSummary.getFileCount());
assertEquals("getDirectoryCount",0,contentSummary.getDirectoryCount());
assertEquals("getQuota",0,contentSummary.getQuota());
assertEquals("getSpaceConsumed",0,contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota",0,contentSummary.getSpaceQuota());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testGetMountCurrentDirectory() throws Exception {
File currentDirectory=new File(".");
String workingDir=currentDirectory.getAbsoluteFile().getCanonicalPath();
DF df=new DF(new File(workingDir),0L);
String mountPath=df.getMount();
File mountDir=new File(mountPath);
assertTrue("Mount dir [" + mountDir.getAbsolutePath() + "] should exist.",mountDir.exists());
assertTrue("Mount dir [" + mountDir.getAbsolutePath() + "] should be directory.",mountDir.isDirectory());
assertTrue("Working dir [" + workingDir + "] should start with ["+ mountPath+ "].",workingDir.startsWith(mountPath));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testFileSystem() throws Exception {
XXDF df=new XXDF();
String expectedFileSystem=Shell.WINDOWS ? df.getDirPath().substring(0,2) : "/dev/sda3";
assertEquals("Invalid filesystem",expectedFileSystem,df.getFilesystem());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testMount() throws Exception {
XXDF df=new XXDF();
String expectedMount=Shell.WINDOWS ? df.getDirPath().substring(0,2) : "/foo/bar";
assertEquals("Invalid mount point",expectedMount,df.getMount());
}
InternalCallVerifier EqualityVerifier
@Test public void testStopRenewalWhenFsGone() throws IOException, InterruptedException {
Configuration conf=mock(Configuration.class);
Token> token=mock(Token.class);
doReturn(new Text("myservice")).when(token).getService();
doAnswer(new Answer(){
public Long answer( InvocationOnMock invocation){
return Time.now() + RENEW_CYCLE;
}
}
).when(token).renew(any(Configuration.class));
RenewableFileSystem fs=mock(RenewableFileSystem.class);
doReturn(conf).when(fs).getConf();
doReturn(token).when(fs).getRenewToken();
renewer.addRenewAction(fs);
assertEquals(1,renewer.getRenewQueueLength());
Thread.sleep(RENEW_CYCLE);
verify(token,atLeast(1)).renew(eq(conf));
verify(token,atMost(2)).renew(eq(conf));
fs=null;
System.gc();
System.gc();
System.gc();
Thread.sleep(RENEW_CYCLE);
verify(token,atLeast(1)).renew(eq(conf));
verify(token,atMost(2)).renew(eq(conf));
assertEquals(0,renewer.getRenewQueueLength());
}
InternalCallVerifier EqualityVerifier
@Test public void testGetNewTokenOnRenewFailure() throws IOException, InterruptedException {
Text service=new Text("myservice");
Configuration conf=mock(Configuration.class);
final Token> token1=mock(Token.class);
doReturn(service).when(token1).getService();
doThrow(new IOException("boom")).when(token1).renew(eq(conf));
final Token> token2=mock(Token.class);
doReturn(service).when(token2).getService();
doAnswer(new Answer(){
public Long answer( InvocationOnMock invocation){
return Time.now() + RENEW_CYCLE;
}
}
).when(token2).renew(eq(conf));
RenewableFileSystem fs=mock(RenewableFileSystem.class);
doReturn(conf).when(fs).getConf();
doReturn(token1).doReturn(token2).when(fs).getRenewToken();
doReturn(token2).when(fs).getDelegationToken(null);
doAnswer(new Answer[]>(){
public Token>[] answer( InvocationOnMock invocation){
return new Token>[]{token2};
}
}
).when(fs).addDelegationTokens(null,null);
renewer.addRenewAction(fs);
assertEquals(1,renewer.getRenewQueueLength());
Thread.sleep(RENEW_CYCLE);
verify(fs).getRenewToken();
verify(token1,atLeast(1)).renew(eq(conf));
verify(token1,atMost(2)).renew(eq(conf));
verify(fs).addDelegationTokens(null,null);
verify(fs).setDelegationToken(eq(token2));
assertEquals(1,renewer.getRenewQueueLength());
renewer.removeRenewAction(fs);
verify(token2).cancel(eq(conf));
assertEquals(0,renewer.getRenewQueueLength());
}
InternalCallVerifier EqualityVerifier
@SuppressWarnings("unchecked") @Test public void testAddRemoveRenewAction() throws IOException, InterruptedException {
Text service=new Text("myservice");
Configuration conf=mock(Configuration.class);
Token> token=mock(Token.class);
doReturn(service).when(token).getService();
doAnswer(new Answer(){
public Long answer( InvocationOnMock invocation){
return Time.now() + RENEW_CYCLE;
}
}
).when(token).renew(any(Configuration.class));
RenewableFileSystem fs=mock(RenewableFileSystem.class);
doReturn(conf).when(fs).getConf();
doReturn(token).when(fs).getRenewToken();
renewer.addRenewAction(fs);
assertEquals("FileSystem not added to DelegationTokenRenewer",1,renewer.getRenewQueueLength());
Thread.sleep(RENEW_CYCLE * 2);
verify(token,atLeast(2)).renew(eq(conf));
verify(token,atMost(3)).renew(eq(conf));
verify(token,never()).cancel(any(Configuration.class));
renewer.removeRenewAction(fs);
verify(token).cancel(eq(conf));
verify(fs,never()).getDelegationToken(null);
verify(fs,never()).setDelegationToken(any(Token.class));
assertEquals("FileSystem not removed from DelegationTokenRenewer",0,renewer.getRenewQueueLength());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=4000) public void testMultipleTokensDoNotDeadlock() throws IOException, InterruptedException {
Configuration conf=mock(Configuration.class);
FileSystem fs=mock(FileSystem.class);
doReturn(conf).when(fs).getConf();
long distantFuture=Time.now() + 3600 * 1000;
Token> token1=mock(Token.class);
doReturn(new Text("myservice1")).when(token1).getService();
doReturn(distantFuture).when(token1).renew(eq(conf));
Token> token2=mock(Token.class);
doReturn(new Text("myservice2")).when(token2).getService();
doReturn(distantFuture).when(token2).renew(eq(conf));
RenewableFileSystem fs1=mock(RenewableFileSystem.class);
doReturn(conf).when(fs1).getConf();
doReturn(token1).when(fs1).getRenewToken();
RenewableFileSystem fs2=mock(RenewableFileSystem.class);
doReturn(conf).when(fs2).getConf();
doReturn(token2).when(fs2).getRenewToken();
renewer.addRenewAction(fs1);
renewer.addRenewAction(fs2);
assertEquals(2,renewer.getRenewQueueLength());
renewer.removeRenewAction(fs1);
assertEquals(1,renewer.getRenewQueueLength());
renewer.removeRenewAction(fs2);
assertEquals(0,renewer.getRenewQueueLength());
verify(token1).cancel(eq(conf));
verify(token2).cancel(eq(conf));
}
InternalCallVerifier EqualityVerifier
@Test public void testAddRenewActionWithNoToken() throws IOException, InterruptedException {
Configuration conf=mock(Configuration.class);
RenewableFileSystem fs=mock(RenewableFileSystem.class);
doReturn(conf).when(fs).getConf();
doReturn(null).when(fs).getRenewToken();
renewer.addRenewAction(fs);
verify(fs).getRenewToken();
assertEquals(0,renewer.getRenewQueueLength());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testShortZeroCopyReads() throws Exception {
HdfsConfiguration conf=initZeroCopyTest();
MiniDFSCluster cluster=null;
final Path TEST_PATH=new Path("/a");
FSDataInputStream fsIn=null;
final int TEST_FILE_LENGTH=12345;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,7567L);
try {
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
}
catch ( InterruptedException e) {
Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
}
catch ( TimeoutException e) {
Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
}
fsIn=fs.open(TEST_PATH);
byte original[]=new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH);
fsIn.close();
fsIn=fs.open(TEST_PATH);
HdfsDataInputStream dfsIn=(HdfsDataInputStream)fsIn;
ByteBuffer result=dfsIn.read(null,8192,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(4096,result.remaining());
Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
Assert.assertArrayEquals(Arrays.copyOfRange(original,0,4096),byteBufferToArray(result));
dfsIn.releaseBuffer(result);
result=dfsIn.read(null,4097,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(4096,result.remaining());
Assert.assertArrayEquals(Arrays.copyOfRange(original,4096,8192),byteBufferToArray(result));
dfsIn.releaseBuffer(result);
}
finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testClientMmapDisable() throws Exception {
HdfsConfiguration conf=initZeroCopyTest();
conf.setBoolean(DFS_CLIENT_MMAP_ENABLED,false);
MiniDFSCluster cluster=null;
final Path TEST_PATH=new Path("/a");
final int TEST_FILE_LENGTH=16385;
final int RANDOM_SEED=23453;
final String CONTEXT="testClientMmapDisable";
FSDataInputStream fsIn=null;
DistributedFileSystem fs=null;
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED);
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
fsIn=fs.open(TEST_PATH);
try {
fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.fail("expected zero-copy read to fail when client mmaps " + "were disabled.");
}
catch ( UnsupportedOperationException e) {
}
}
finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
fsIn=null;
fs=null;
cluster=null;
try {
conf.setBoolean(DFS_CLIENT_MMAP_ENABLED,true);
conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE,0);
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT + ".1");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED);
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
fsIn=fs.open(TEST_PATH);
ByteBuffer buf=fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
fsIn.releaseBuffer(buf);
IOUtils.skipFully(fsIn,TEST_FILE_LENGTH - 1);
buf=fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(null,buf);
}
finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testZeroCopyMmapCache() throws Exception {
HdfsConfiguration conf=initZeroCopyTest();
MiniDFSCluster cluster=null;
final Path TEST_PATH=new Path("/a");
final int TEST_FILE_LENGTH=16385;
final int RANDOM_SEED=23453;
final String CONTEXT="testZeroCopyMmapCacheContext";
FSDataInputStream fsIn=null;
ByteBuffer results[]={null,null,null,null};
DistributedFileSystem fs=null;
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED);
try {
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
}
catch ( InterruptedException e) {
Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
}
catch ( TimeoutException e) {
Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
}
fsIn=fs.open(TEST_PATH);
byte original[]=new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH);
fsIn.close();
fsIn=fs.open(TEST_PATH);
final ShortCircuitCache cache=ClientContext.get(CONTEXT,new DFSClient.Conf(conf)).getShortCircuitCache();
cache.accept(new CountingVisitor(0,5,5,0));
results[0]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
fsIn.seek(0);
results[1]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
final ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fs,TEST_PATH);
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
ShortCircuitReplica replica=replicas.get(new ExtendedBlockId(firstBlock.getBlockId(),firstBlock.getBlockPoolId()));
Assert.assertNotNull(replica);
Assert.assertTrue(replica.hasMmap());
Assert.assertNull(replica.getEvictableTimeNs());
}
}
);
results[2]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
results[3]=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
cache.accept(new CountingVisitor(3,5,2,0));
for ( ByteBuffer buffer : results) {
if (buffer != null) {
fsIn.releaseBuffer(buffer);
}
}
fsIn.close();
GenericTestUtils.waitFor(new Supplier(){
public Boolean get(){
final MutableBoolean finished=new MutableBoolean(false);
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
finished.setValue(evictableMmapped.isEmpty());
}
}
);
return finished.booleanValue();
}
}
,10,60000);
cache.accept(new CountingVisitor(0,-1,-1,-1));
fs.close();
cluster.shutdown();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test that we can zero-copy read cached data even without disabling
* checksums.
*/
@Test(timeout=120000) public void testZeroCopyReadOfCachedData() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
BlockReaderTestUtil.enableBlockReaderFactoryTracing();
BlockReaderTestUtil.enableHdfsCachingTracing();
final int TEST_FILE_LENGTH=16385;
final Path TEST_PATH=new Path("/a");
final int RANDOM_SEED=23453;
HdfsConfiguration conf=initZeroCopyTest();
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,false);
final String CONTEXT="testZeroCopyReadOfCachedData";
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT);
conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,4096));
MiniDFSCluster cluster=null;
ByteBuffer result=null, result2=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FsDatasetSpi> fsd=cluster.getDataNodes().get(0).getFSDataset();
DistributedFileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED);
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
byte original[]=DFSTestUtil.calculateFileContentsFromSeed(RANDOM_SEED,TEST_FILE_LENGTH);
FSDataInputStream fsIn=fs.open(TEST_PATH);
try {
result=fsIn.read(null,TEST_FILE_LENGTH / 2,EnumSet.noneOf(ReadOption.class));
Assert.fail("expected UnsupportedOperationException");
}
catch ( UnsupportedOperationException e) {
}
fs.addCachePool(new CachePoolInfo("pool1"));
long directiveId=fs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(TEST_PATH).setReplication((short)1).setPool("pool1").build());
int numBlocks=(int)Math.ceil((double)TEST_FILE_LENGTH / BLOCK_SIZE);
DFSTestUtil.verifyExpectedCacheUsage(DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,BLOCK_SIZE),numBlocks,cluster.getDataNodes().get(0).getFSDataset());
try {
result=fsIn.read(null,TEST_FILE_LENGTH,EnumSet.noneOf(ReadOption.class));
}
catch ( UnsupportedOperationException e) {
Assert.fail("expected to be able to read cached file via zero-copy");
}
Assert.assertArrayEquals(Arrays.copyOfRange(original,0,BLOCK_SIZE),byteBufferToArray(result));
FSDataInputStream fsIn2=fs.open(TEST_PATH);
try {
result2=fsIn2.read(null,TEST_FILE_LENGTH,EnumSet.noneOf(ReadOption.class));
}
catch ( UnsupportedOperationException e) {
Assert.fail("expected to be able to read cached file via zero-copy");
}
Assert.assertArrayEquals(Arrays.copyOfRange(original,0,BLOCK_SIZE),byteBufferToArray(result2));
fsIn2.releaseBuffer(result2);
fsIn2.close();
final ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fs,TEST_PATH);
final ShortCircuitCache cache=ClientContext.get(CONTEXT,new DFSClient.Conf(conf)).getShortCircuitCache();
waitForReplicaAnchorStatus(cache,firstBlock,true,true,1);
fs.removeCacheDirective(directiveId);
waitForReplicaAnchorStatus(cache,firstBlock,false,true,1);
fsIn.releaseBuffer(result);
waitForReplicaAnchorStatus(cache,firstBlock,false,false,1);
DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd);
fsIn.close();
fs.close();
cluster.shutdown();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testZeroCopyReads() throws Exception {
HdfsConfiguration conf=initZeroCopyTest();
MiniDFSCluster cluster=null;
final Path TEST_PATH=new Path("/a");
FSDataInputStream fsIn=null;
final int TEST_FILE_LENGTH=12345;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,7567L);
try {
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
}
catch ( InterruptedException e) {
Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
}
catch ( TimeoutException e) {
Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
}
fsIn=fs.open(TEST_PATH);
byte original[]=new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH);
fsIn.close();
fsIn=fs.open(TEST_PATH);
ByteBuffer result=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(4096,result.remaining());
HdfsDataInputStream dfsIn=(HdfsDataInputStream)fsIn;
Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
Assert.assertArrayEquals(Arrays.copyOfRange(original,0,4096),byteBufferToArray(result));
fsIn.releaseBuffer(result);
}
finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test public void test2GBMmapLimit() throws Exception {
Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
HdfsConfiguration conf=initZeroCopyTest();
final long TEST_FILE_LENGTH=2469605888L;
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,"NULL");
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,TEST_FILE_LENGTH);
MiniDFSCluster cluster=null;
final Path TEST_PATH=new Path("/a");
final String CONTEXT="test2GBMmapLimit";
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT);
FSDataInputStream fsIn=null, fsIn2=null;
ByteBuffer buf1=null, buf2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,0xB);
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
fsIn=fs.open(TEST_PATH);
buf1=fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(1,buf1.remaining());
fsIn.releaseBuffer(buf1);
buf1=null;
fsIn.seek(2147483640L);
buf1=fsIn.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(7,buf1.remaining());
Assert.assertEquals(Integer.MAX_VALUE,buf1.limit());
fsIn.releaseBuffer(buf1);
buf1=null;
Assert.assertEquals(2147483647L,fsIn.getPos());
try {
buf1=fsIn.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.fail("expected UnsupportedOperationException");
}
catch ( UnsupportedOperationException e) {
}
fsIn.close();
fsIn=null;
final Path TEST_PATH2=new Path("/b");
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,268435456L);
DFSTestUtil.createFile(fs,TEST_PATH2,1024 * 1024,TEST_FILE_LENGTH,268435456L,(short)1,0xA);
fsIn2=fs.open(TEST_PATH2);
fsIn2.seek(2147483640L);
buf2=fsIn2.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(8,buf2.remaining());
Assert.assertEquals(2147483648L,fsIn2.getPos());
fsIn2.releaseBuffer(buf2);
buf2=null;
buf2=fsIn2.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(1024,buf2.remaining());
Assert.assertEquals(2147484672L,fsIn2.getPos());
fsIn2.releaseBuffer(buf2);
buf2=null;
}
finally {
if (buf1 != null) {
fsIn.releaseBuffer(buf1);
}
if (buf2 != null) {
fsIn2.releaseBuffer(buf2);
}
IOUtils.cleanup(null,fsIn,fsIn2);
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testZeroCopyReadsNoFallback() throws Exception {
HdfsConfiguration conf=initZeroCopyTest();
MiniDFSCluster cluster=null;
final Path TEST_PATH=new Path("/a");
FSDataInputStream fsIn=null;
final int TEST_FILE_LENGTH=12345;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,7567L);
try {
DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1);
}
catch ( InterruptedException e) {
Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
}
catch ( TimeoutException e) {
Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
}
fsIn=fs.open(TEST_PATH);
byte original[]=new byte[TEST_FILE_LENGTH];
IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH);
fsIn.close();
fsIn=fs.open(TEST_PATH);
HdfsDataInputStream dfsIn=(HdfsDataInputStream)fsIn;
ByteBuffer result;
try {
result=dfsIn.read(null,4097,EnumSet.noneOf(ReadOption.class));
Assert.fail("expected UnsupportedOperationException");
}
catch ( UnsupportedOperationException e) {
}
result=dfsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(4096,result.remaining());
Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
Assert.assertArrayEquals(Arrays.copyOfRange(original,0,4096),byteBufferToArray(result));
}
finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Remove the target directory after the getListing RPC
*/
@Test public void testTargetDeletionForListLocatedStatus() throws Exception {
LOG.info("Test Target Delete For listLocatedStatus");
RemoteIterator itor=fs.listLocatedStatus(TEST_PATH);
itor.next();
assertFalse(itor.hasNext());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Rename test where both src and dst are files
*/
@Test public void testDeletionOfDstFile() throws Exception {
Path src=getTestPath("testDeletionOfDstFile/dir/src");
Path dst=getTestPath("testDeletionOfDstFile/newdir/dst");
createFile(src);
createFile(dst);
final FSNamesystem namesystem=cluster.getNamesystem();
final long blocks=namesystem.getBlocksTotal();
final long fileCount=namesystem.getFilesTotal();
rename(src,dst,false,false,true,Rename.OVERWRITE);
Assert.assertEquals(blocks - 1,namesystem.getBlocksTotal());
Assert.assertEquals(fileCount - 1,namesystem.getFilesTotal());
restartCluster(false);
int count=0;
boolean exception=true;
src=getTestPath("testDeletionOfDstFile/dir/src");
dst=getTestPath("testDeletionOfDstFile/newdir/dst");
while (exception && count < 5) {
try {
exists(fc,src);
exception=false;
}
catch ( Exception e) {
LOG.warn("Exception " + " count " + count + " "+ e.getMessage());
Thread.sleep(1000);
count++;
}
}
Assert.assertFalse(exists(fc,src));
Assert.assertTrue(exists(fc,dst));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Rename test where both src and dst are directories
*/
@Test public void testDeletionOfDstDirectory() throws Exception {
Path src=getTestPath("testDeletionOfDstDirectory/dir/src");
Path dst=getTestPath("testDeletionOfDstDirectory/newdir/dst");
fc.mkdir(src,FileContext.DEFAULT_PERM,true);
fc.mkdir(dst,FileContext.DEFAULT_PERM,true);
FSNamesystem namesystem=cluster.getNamesystem();
long fileCount=namesystem.getFilesTotal();
rename(src,dst,false,false,true,Rename.OVERWRITE);
Assert.assertEquals(fileCount - 1,namesystem.getFilesTotal());
restartCluster(false);
src=getTestPath("testDeletionOfDstDirectory/dir/src");
dst=getTestPath("testDeletionOfDstDirectory/newdir/dst");
int count=0;
boolean exception=true;
while (exception && count < 5) {
try {
exists(fc,src);
exception=false;
}
catch ( Exception e) {
LOG.warn("Exception " + " count " + count + " "+ e.getMessage());
Thread.sleep(1000);
count++;
}
}
Assert.assertFalse(exists(fc,src));
Assert.assertTrue(exists(fc,dst));
}
InternalCallVerifier BooleanVerifier
@Test public void testDeleteOnExit() throws Exception {
Path file1=helper.getTestRootPath(fc,"file1");
createFile(fc,file1,numBlocks,blockSize);
fc.deleteOnExit(file1);
checkDeleteOnExitData(1,fc,file1);
Assert.assertTrue(ShutdownHookManager.get().hasShutdownHook(FileContext.FINALIZER));
Path file2=helper.getTestRootPath(fc,"dir1/file2");
createFile(fc,file2,numBlocks,blockSize);
fc.deleteOnExit(file2);
checkDeleteOnExitData(1,fc,file1,file2);
Path dir=helper.getTestRootPath(fc,"dir3/dir4/dir5/dir6");
createFile(fc,dir,numBlocks,blockSize);
fc.deleteOnExit(dir);
checkDeleteOnExitData(1,fc,file1,file2,dir);
FileContext.FINALIZER.run();
checkDeleteOnExitData(0,fc,new Path[0]);
Assert.assertFalse(exists(fc,file1));
Assert.assertFalse(exists(fc,file2));
Assert.assertFalse(exists(fc,dir));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testFileContextResolveAfs() throws IOException {
Configuration conf=new Configuration();
localFs=FileSystem.get(conf);
Path localPath=new Path(TEST_ROOT_DIR_LOCAL + "/TestFileContextResolveAfs1");
Path linkPath=localFs.makeQualified(new Path(TEST_ROOT_DIR_LOCAL,"TestFileContextResolveAfs2"));
localFs.mkdirs(new Path(TEST_ROOT_DIR_LOCAL));
localFs.create(localPath);
fc.createSymlink(localPath,linkPath,true);
Set afsList=fc.resolveAbstractFileSystems(linkPath);
Assert.assertEquals(1,afsList.size());
localFs.deleteOnExit(localPath);
localFs.deleteOnExit(linkPath);
localFs.close();
}
InternalCallVerifier BooleanVerifier
/**
* Check that FileStatus are not equal if their paths are not equal.
*/
@Test public void testNotEquals(){
Path path1=new Path("path1");
Path path2=new Path("path2");
FileStatus fileStatus1=new FileStatus(1,true,1,1,1,1,FsPermission.valueOf("-rw-rw-rw-"),"one","one",null,path1);
FileStatus fileStatus2=new FileStatus(1,true,1,1,1,1,FsPermission.valueOf("-rw-rw-rw-"),"one","one",null,path2);
assertFalse(fileStatus1.equals(fileStatus2));
assertFalse(fileStatus2.equals(fileStatus1));
}
InternalCallVerifier EqualityVerifier
/**
* Check that the write and readField methods work correctly.
*/
@Test public void testFileStatusWritable() throws Exception {
FileStatus[] tests={new FileStatus(1,false,5,3,4,5,null,"","",new Path("/a/b")),new FileStatus(0,false,1,2,3,new Path("/")),new FileStatus(1,false,5,3,4,5,null,"","",new Path("/a/b"))};
LOG.info("Writing FileStatuses to a ByteArrayOutputStream");
ByteArrayOutputStream baos=new ByteArrayOutputStream();
DataOutput out=new DataOutputStream(baos);
for ( FileStatus fs : tests) {
fs.write(out);
}
LOG.info("Creating ByteArrayInputStream object");
DataInput in=new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
LOG.info("Testing if read objects are equal to written ones");
FileStatus dest=new FileStatus();
int iterator=0;
for ( FileStatus fs : tests) {
dest.readFields(in);
assertEquals("Different FileStatuses in iteration " + iterator,dest,fs);
iterator++;
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testDefaultFsUris() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.defaultfs.impl",DefaultFs.class.getName());
final URI defaultUri=URI.create("defaultfs://host");
FileSystem.setDefaultUri(conf,defaultUri);
FileSystem fs=null;
final FileSystem defaultFs=FileSystem.get(conf);
assertEquals(defaultUri,defaultFs.getUri());
fs=FileSystem.get(URI.create("defaultfs:/"),conf);
assertSame(defaultFs,fs);
fs=FileSystem.get(URI.create("defaultfs:///"),conf);
assertSame(defaultFs,fs);
fs=FileSystem.get(URI.create("defaultfs://host"),conf);
assertSame(defaultFs,fs);
fs=FileSystem.get(URI.create("defaultfs://host2"),conf);
assertNotSame(defaultFs,fs);
fs=FileSystem.get(URI.create("/"),conf);
assertSame(defaultFs,fs);
try {
fs=FileSystem.get(URI.create("//host"),conf);
fail("got fs with auth but no scheme");
}
catch ( Exception e) {
assertEquals("No FileSystem for scheme: null",e.getMessage());
}
try {
fs=FileSystem.get(URI.create("//host2"),conf);
fail("got fs with auth but no scheme");
}
catch ( Exception e) {
assertEquals("No FileSystem for scheme: null",e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testUserFS() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
FileSystem fsU1=FileSystem.get(new URI("cachedfile://a"),conf,"bar");
FileSystem fsU2=FileSystem.get(new URI("cachedfile://a"),conf,"foo");
assertNotSame(fsU1,fsU2);
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@SuppressWarnings("unchecked") @Test public void testCacheForUgi() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
UserGroupInformation ugiA=UserGroupInformation.createRemoteUser("foo");
UserGroupInformation ugiB=UserGroupInformation.createRemoteUser("bar");
FileSystem fsA=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
FileSystem fsA1=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertSame(fsA,fsA1);
FileSystem fsB=ugiB.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertNotSame(fsA,fsB);
Token t1=mock(Token.class);
UserGroupInformation ugiA2=UserGroupInformation.createRemoteUser("foo");
fsA=ugiA2.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertNotSame(fsA,fsA1);
ugiA.addToken(t1);
fsA=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertSame(fsA,fsA1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsUniqueness() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
FileSystem fs1=FileSystem.get(conf);
FileSystem fs2=FileSystem.get(conf);
assertTrue(fs1 == fs2);
fs1=FileSystem.newInstance(new URI("cachedfile://a"),conf,"bar");
fs2=FileSystem.newInstance(new URI("cachedfile://a"),conf,"bar");
assertTrue(fs1 != fs2 && !fs1.equals(fs2));
fs1.close();
fs2.close();
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCloseAllForUGI() throws Exception {
final Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
UserGroupInformation ugiA=UserGroupInformation.createRemoteUser("foo");
FileSystem fsA=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
FileSystem fsA1=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertSame(fsA,fsA1);
FileSystem.closeAllForUGI(ugiA);
fsA1=ugiA.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return FileSystem.get(new URI("cachedfile://a"),conf);
}
}
);
assertNotSame(fsA,fsA1);
}
InternalCallVerifier BooleanVerifier
@Test public void testCancelDeleteOnExit() throws IOException {
FileSystem mockFs=mock(FileSystem.class);
FileSystem fs=new FilterFileSystem(mockFs);
Path path=new Path("/a");
when(mockFs.getFileStatus(eq(path))).thenReturn(new FileStatus());
assertTrue(fs.deleteOnExit(path));
verify(mockFs).getFileStatus(eq(path));
assertTrue(fs.cancelDeleteOnExit(path));
assertFalse(fs.cancelDeleteOnExit(path));
reset(mockFs);
fs.close();
verify(mockFs,never()).getFileStatus(any(Path.class));
verify(mockFs,never()).delete(any(Path.class),anyBoolean());
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCacheDisabled() throws Exception {
Configuration conf=new Configuration();
conf.set("fs.uncachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
conf.setBoolean("fs.uncachedfile.impl.disable.cache",true);
FileSystem fs1=FileSystem.get(new URI("uncachedfile://a"),conf);
FileSystem fs2=FileSystem.get(new URI("uncachedfile://a"),conf);
assertNotSame(fs1,fs2);
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCacheEnabled() throws Exception {
Configuration conf=new Configuration();
conf.set("fs.cachedfile.impl",FileSystem.getFileSystemClass("file",null).getName());
FileSystem fs1=FileSystem.get(new URI("cachedfile://a"),conf);
FileSystem fs2=FileSystem.get(new URI("cachedfile://a"),conf);
assertSame(fs1,fs2);
}
InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFsWithChildTokensOneExists() throws Exception {
Credentials credentials=new Credentials();
Text service1=new Text("singleTokenFs1");
Text service2=new Text("singleTokenFs2");
Token> token=mock(Token.class);
credentials.addToken(service2,token);
MockFileSystem fs1=createFileSystemForServiceName(service1);
MockFileSystem fs2=createFileSystemForServiceName(service2);
MockFileSystem fs3=createFileSystemForServiceName(null);
MockFileSystem multiFs=createFileSystemForServiceName(null,fs1,fs2,fs3);
multiFs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(multiFs,false);
verifyTokenFetch(fs1,true);
verifyTokenFetch(fs2,false);
verifyTokenFetch(fs3,false);
assertEquals(2,credentials.numberOfTokens());
assertNotNull(credentials.getToken(service1));
assertSame(token,credentials.getToken(service2));
}
InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFsWithMyOwnExistsAndChildTokens() throws Exception {
Credentials credentials=new Credentials();
Text service1=new Text("singleTokenFs1");
Text service2=new Text("singleTokenFs2");
Text myService=new Text("multiTokenFs");
Token> token=mock(Token.class);
credentials.addToken(myService,token);
MockFileSystem fs1=createFileSystemForServiceName(service1);
MockFileSystem fs2=createFileSystemForServiceName(service2);
MockFileSystem multiFs=createFileSystemForServiceName(myService,fs1,fs2);
multiFs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(multiFs,false);
verifyTokenFetch(fs1,true);
verifyTokenFetch(fs2,true);
assertEquals(3,credentials.numberOfTokens());
assertSame(token,credentials.getToken(myService));
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFsWithToken() throws Exception {
Text service=new Text("singleTokenFs");
MockFileSystem fs=createFileSystemForServiceName(service);
Credentials credentials=new Credentials();
fs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(fs,true);
assertEquals(1,credentials.numberOfTokens());
assertNotNull(credentials.getToken(service));
}
InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testFsWithTokenExists() throws Exception {
Credentials credentials=new Credentials();
Text service=new Text("singleTokenFs");
MockFileSystem fs=createFileSystemForServiceName(service);
Token> token=mock(Token.class);
credentials.addToken(service,token);
fs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(fs,false);
assertEquals(1,credentials.numberOfTokens());
assertSame(token,credentials.getToken(service));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFsWithDuplicateChildren() throws Exception {
Credentials credentials=new Credentials();
Text service=new Text("singleTokenFs1");
MockFileSystem fs=createFileSystemForServiceName(service);
MockFileSystem multiFs=createFileSystemForServiceName(null,fs,new FilterFileSystem(fs));
multiFs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(multiFs,false);
verifyTokenFetch(fs,true);
assertEquals(1,credentials.numberOfTokens());
assertNotNull(credentials.getToken(service));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFsWithChildTokens() throws Exception {
Credentials credentials=new Credentials();
Text service1=new Text("singleTokenFs1");
Text service2=new Text("singleTokenFs2");
MockFileSystem fs1=createFileSystemForServiceName(service1);
MockFileSystem fs2=createFileSystemForServiceName(service2);
MockFileSystem fs3=createFileSystemForServiceName(null);
MockFileSystem multiFs=createFileSystemForServiceName(null,fs1,fs2,fs3);
multiFs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(multiFs,false);
verifyTokenFetch(fs1,true);
verifyTokenFetch(fs2,true);
verifyTokenFetch(fs3,false);
assertEquals(2,credentials.numberOfTokens());
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
}
InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testFsWithDuplicateChildrenTokenExists() throws Exception {
Credentials credentials=new Credentials();
Text service=new Text("singleTokenFs1");
Token> token=mock(Token.class);
credentials.addToken(service,token);
MockFileSystem fs=createFileSystemForServiceName(service);
MockFileSystem multiFs=createFileSystemForServiceName(null,fs,new FilterFileSystem(fs));
multiFs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(multiFs,false);
verifyTokenFetch(fs,false);
assertEquals(1,credentials.numberOfTokens());
assertSame(token,credentials.getToken(service));
}
InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFsWithNestedDuplicatesChildren() throws Exception {
Credentials credentials=new Credentials();
Text service1=new Text("singleTokenFs1");
Text service2=new Text("singleTokenFs2");
Text service4=new Text("singleTokenFs4");
Text multiService=new Text("multiTokenFs");
Token> token2=mock(Token.class);
credentials.addToken(service2,token2);
MockFileSystem fs1=createFileSystemForServiceName(service1);
MockFileSystem fs1B=createFileSystemForServiceName(service1);
MockFileSystem fs2=createFileSystemForServiceName(service2);
MockFileSystem fs3=createFileSystemForServiceName(null);
MockFileSystem fs4=createFileSystemForServiceName(service4);
MockFileSystem multiFs=createFileSystemForServiceName(multiService,fs1,fs1B,fs2,fs2,new FilterFileSystem(fs3),new FilterFileSystem(new FilterFileSystem(fs4)));
MockFileSystem superMultiFs=createFileSystemForServiceName(null,fs1,fs1B,fs1,new FilterFileSystem(fs3),new FilterFileSystem(multiFs));
superMultiFs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(superMultiFs,false);
verifyTokenFetch(multiFs,true);
verifyTokenFetch(fs1,true);
verifyTokenFetch(fs2,false);
verifyTokenFetch(fs3,false);
verifyTokenFetch(fs4,true);
assertEquals(4,credentials.numberOfTokens());
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
assertSame(token2,credentials.getToken(service2));
assertNotNull(credentials.getToken(multiService));
assertNotNull(credentials.getToken(service4));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFsWithMyOwnAndChildTokens() throws Exception {
Credentials credentials=new Credentials();
Text service1=new Text("singleTokenFs1");
Text service2=new Text("singleTokenFs2");
Text myService=new Text("multiTokenFs");
Token> token=mock(Token.class);
credentials.addToken(service2,token);
MockFileSystem fs1=createFileSystemForServiceName(service1);
MockFileSystem fs2=createFileSystemForServiceName(service2);
MockFileSystem multiFs=createFileSystemForServiceName(myService,fs1,fs2);
multiFs.addDelegationTokens(renewer,credentials);
verifyTokenFetch(multiFs,true);
verifyTokenFetch(fs1,true);
verifyTokenFetch(fs2,false);
assertEquals(3,credentials.numberOfTokens());
assertNotNull(credentials.getToken(myService));
assertNotNull(credentials.getToken(service1));
assertNotNull(credentials.getToken(service2));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRepresentsDir() throws Exception {
Path subdirDstPath=new Path(dstPath,srcPath.getName());
String argv[]=null;
lfs.delete(dstPath,true);
assertFalse(lfs.exists(dstPath));
argv=new String[]{"-put",srcPath.toString(),dstPath.toString()};
assertEquals(0,shell.run(argv));
assertTrue(lfs.exists(dstPath) && lfs.isFile(dstPath));
lfs.delete(dstPath,true);
assertFalse(lfs.exists(dstPath));
lfs.delete(dstPath,true);
for ( String suffix : new String[]{"/","/."}) {
argv=new String[]{"-put",srcPath.toString(),dstPath.toString() + suffix};
assertEquals(1,shell.run(argv));
assertFalse(lfs.exists(dstPath));
assertFalse(lfs.exists(subdirDstPath));
}
for ( String suffix : new String[]{"/","/."}) {
lfs.delete(dstPath,true);
lfs.mkdirs(dstPath);
argv=new String[]{"-put",srcPath.toString(),dstPath.toString() + suffix};
assertEquals(0,shell.run(argv));
assertTrue(lfs.exists(subdirDstPath));
assertTrue(lfs.isFile(subdirDstPath));
}
String dotdotDst=dstPath + "/foo/..";
lfs.delete(dstPath,true);
lfs.mkdirs(new Path(dstPath,"foo"));
argv=new String[]{"-put",srcPath.toString(),dotdotDst};
assertEquals(0,shell.run(argv));
assertTrue(lfs.exists(subdirDstPath));
assertTrue(lfs.isFile(subdirDstPath));
}
InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test public void testMoveFromWindowsLocalPath() throws Exception {
assumeTrue(Path.WINDOWS);
Path testRoot=new Path(testRootDir,"testPutFile");
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
Path target=new Path(testRoot,"target");
Path srcFile=new Path(testRoot,new Path("srcFile"));
lfs.createNewFile(srcFile);
String winSrcFile=(new File(srcFile.toUri().getPath().toString())).getAbsolutePath();
shellRun(0,"-moveFromLocal",winSrcFile,target.toString());
assertFalse(lfs.exists(srcFile));
assertTrue(lfs.exists(target));
assertTrue(lfs.isFile(target));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCopyMerge() throws Exception {
Path root=new Path(testRootDir,"TestMerge");
Path f1=new Path(root,"f1");
Path f2=new Path(root,"f2");
Path f3=new Path(root,"f3");
Path fnf=new Path(root,"fnf");
Path d=new Path(root,"dir");
Path df1=new Path(d,"df1");
Path df2=new Path(d,"df2");
Path df3=new Path(d,"df3");
createFile(f1,f2,f3,df1,df2,df3);
int exit;
exit=shell.run(new String[]{"-getmerge",f1.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1",readFile("out"));
exit=shell.run(new String[]{"-getmerge",fnf.toString(),"out"});
assertEquals(1,exit);
assertFalse(lfs.exists(new Path("out")));
exit=shell.run(new String[]{"-getmerge",f1.toString(),f2.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1f2",readFile("out"));
exit=shell.run(new String[]{"-getmerge",f2.toString(),f1.toString(),"out"});
assertEquals(0,exit);
assertEquals("f2f1",readFile("out"));
exit=shell.run(new String[]{"-getmerge","-nl",f1.toString(),f2.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1\nf2\n",readFile("out"));
shell.run(new String[]{"-getmerge","-nl",new Path(root,"f*").toString(),"out"});
assertEquals(0,exit);
assertEquals("f1\nf2\nf3\n",readFile("out"));
shell.run(new String[]{"-getmerge","-nl",root.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1\nf2\nf3\n",readFile("out"));
shell.run(new String[]{"-getmerge","-nl",d.toString(),"out"});
assertEquals(0,exit);
assertEquals("df1\ndf2\ndf3\n",readFile("out"));
shell.run(new String[]{"-getmerge","-nl",f1.toString(),d.toString(),f2.toString(),"out"});
assertEquals(0,exit);
assertEquals("f1\ndf1\ndf2\ndf3\nf2\n",readFile("out"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveDirFromLocal() throws Exception {
Path testRoot=new Path(testRootDir,"testPutDir");
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
Path srcDir=new Path(testRoot,"srcDir");
lfs.mkdirs(srcDir);
Path targetDir=new Path(testRoot,"target");
int exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()});
assertEquals(0,exit);
assertFalse(lfs.exists(srcDir));
assertTrue(lfs.exists(targetDir));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveFileFromLocal() throws Exception {
Path testRoot=new Path(testRootDir,"testPutFile");
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
Path target=new Path(testRoot,"target");
Path srcFile=new Path(testRoot,new Path("srcFile"));
lfs.createNewFile(srcFile);
int exit=shell.run(new String[]{"-moveFromLocal",srcFile.toString(),target.toString()});
assertEquals(0,exit);
assertFalse(lfs.exists(srcFile));
assertTrue(lfs.exists(target));
assertTrue(lfs.isFile(target));
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before public void prepFiles() throws Exception {
lfs.setVerifyChecksum(true);
lfs.setWriteChecksum(true);
lfs.delete(srcPath,true);
lfs.delete(dstPath,true);
FSDataOutputStream out=lfs.create(srcPath);
out.writeChars("hi");
out.close();
assertTrue(lfs.exists(lfs.getChecksumFile(srcPath)));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveDirFromLocalDestExists() throws Exception {
Path testRoot=new Path(testRootDir,"testPutDir");
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
Path srcDir=new Path(testRoot,"srcDir");
lfs.mkdirs(srcDir);
Path targetDir=new Path(testRoot,"target");
lfs.mkdirs(targetDir);
int exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()});
assertEquals(0,exit);
assertFalse(lfs.exists(srcDir));
assertTrue(lfs.exists(new Path(targetDir,srcDir.getName())));
lfs.mkdirs(srcDir);
exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()});
assertEquals(1,exit);
assertTrue(lfs.exists(srcDir));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRmForceWithNonexistentGlob() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream err=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(err);
try {
int exit=shell.run(new String[]{"-rm","-f","nomatch*"});
assertEquals(0,exit);
assertTrue(bytes.toString().isEmpty());
}
finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
InternalCallVerifier BooleanVerifier
/**
* Test Chown 1. Create and write file on FS 2. Verify that exit code for
* Chown on existing file is 0 3. Verify that exit code for Chown on
* non-existing file is 1 4. Verify that exit code for Chown with glob input
* on non-existing file is 1 5. Verify that exit code for Chown with glob
* input on existing file in 0
* @throws Exception
*/
@Test(timeout=30000) public void testChown() throws Exception {
Path p1=new Path(TEST_ROOT_DIR,"testChown/fileExists");
final String f1=p1.toUri().getPath();
final String f2=new Path(TEST_ROOT_DIR,"testChown/fileDoesNotExist").toUri().getPath();
final String f3=new Path(TEST_ROOT_DIR,"testChown/nonExistingfiles*").toUri().getPath();
final Path p4=new Path(TEST_ROOT_DIR,"testChown/file1");
final Path p5=new Path(TEST_ROOT_DIR,"testChown/file2");
final Path p6=new Path(TEST_ROOT_DIR,"testChown/file3");
final String f7=new Path(TEST_ROOT_DIR,"testChown/file*").toUri().getPath();
writeFile(fileSys,p1);
assertTrue(fileSys.exists(p1));
change(0,"admin",null,f1);
change(1,"admin",null,f2);
change(1,"admin",null,f3);
writeFile(fileSys,p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys,p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys,p6);
assertTrue(fileSys.exists(p6));
change(0,"admin",null,f7);
change(0,"admin","Test",f1);
change(0,"admin","",f1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream out=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(out);
final String results;
try {
Path tdir=new Path(TEST_ROOT_DIR,"notNullCopy");
fileSys.delete(tdir,true);
fileSys.mkdirs(tdir);
String[] args=new String[3];
args[0]="-get";
args[1]=new Path(tdir.toUri().getPath(),"/invalidSrc").toString();
args[2]=new Path(tdir.toUri().getPath(),"/invalidDst").toString();
assertTrue("file exists",!fileSys.exists(new Path(args[1])));
assertTrue("file exists",!fileSys.exists(new Path(args[2])));
int run=shell.run(args);
results=bytes.toString();
assertEquals("Return code should be 1",1,run);
assertTrue(" Null is coming when source path is invalid. ",!results.contains("get: null"));
assertTrue(" Not displaying the intended message ",results.contains("get: `" + args[1] + "': No such file or directory"));
}
finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testInvalidDefaultFS() throws Exception {
FsShell shell=new FsShell();
Configuration conf=new Configuration();
conf.set(FS_DEFAULT_NAME_KEY,"hhhh://doesnotexist/");
shell.setConf(conf);
String[] args=new String[2];
args[0]="-ls";
args[1]="file:///";
int res=shell.run(args);
System.out.println("res =" + res);
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream out=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(out);
final String results;
try {
int run=shell.run(args);
results=bytes.toString();
LOG.info("result=" + results);
assertTrue("Return code should be 0",run == 0);
}
finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRmWithNonexistentGlob() throws Exception {
Configuration conf=new Configuration();
FsShell shell=new FsShell();
shell.setConf(conf);
final ByteArrayOutputStream bytes=new ByteArrayOutputStream();
final PrintStream err=new PrintStream(bytes);
final PrintStream oldErr=System.err;
System.setErr(err);
final String results;
try {
int exit=shell.run(new String[]{"-rm","nomatch*"});
assertEquals(1,exit);
results=bytes.toString();
assertTrue(results.contains("rm: `nomatch*': No such file or directory"));
}
finally {
IOUtils.closeStream(err);
System.setErr(oldErr);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test Chmod 1. Create and write file on FS 2. Verify that exit code for
* chmod on existing file is 0 3. Verify that exit code for chmod on
* non-existing file is 1 4. Verify that exit code for chmod with glob input
* on non-existing file is 1 5. Verify that exit code for chmod with glob
* input on existing file in 0
* @throws Exception
*/
@Test(timeout=30000) public void testChmod() throws Exception {
Path p1=new Path(TEST_ROOT_DIR,"testChmod/fileExists");
final String f1=p1.toUri().getPath();
final String f2=new Path(TEST_ROOT_DIR,"testChmod/fileDoesNotExist").toUri().getPath();
final String f3=new Path(TEST_ROOT_DIR,"testChmod/nonExistingfiles*").toUri().getPath();
final Path p4=new Path(TEST_ROOT_DIR,"testChmod/file1");
final Path p5=new Path(TEST_ROOT_DIR,"testChmod/file2");
final Path p6=new Path(TEST_ROOT_DIR,"testChmod/file3");
final String f7=new Path(TEST_ROOT_DIR,"testChmod/file*").toUri().getPath();
writeFile(fileSys,p1);
assertTrue(fileSys.exists(p1));
String argv[]={"-chmod","777",f1};
assertEquals(0,fsShell.run(argv));
String argv2[]={"-chmod","777",f2};
assertEquals(1,fsShell.run(argv2));
String argv3[]={"-chmod","777",f3};
assertEquals(1,fsShell.run(argv3));
writeFile(fileSys,p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys,p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys,p6);
assertTrue(fileSys.exists(p6));
String argv4[]={"-chmod","777",f7};
assertEquals(0,fsShell.run(argv4));
}
InternalCallVerifier BooleanVerifier
/**
* Test Chgrp 1. Create and write file on FS 2. Verify that exit code for
* chgrp on existing file is 0 3. Verify that exit code for chgrp on
* non-existing file is 1 4. Verify that exit code for chgrp with glob input
* on non-existing file is 1 5. Verify that exit code for chgrp with glob
* input on existing file in 0
* @throws Exception
*/
@Test(timeout=30000) public void testChgrp() throws Exception {
Path p1=new Path(TEST_ROOT_DIR,"testChgrp/fileExists");
final String f1=p1.toUri().getPath();
final String f2=new Path(TEST_ROOT_DIR,"testChgrp/fileDoesNotExist").toUri().getPath();
final String f3=new Path(TEST_ROOT_DIR,"testChgrp/nonExistingfiles*").toUri().getPath();
final Path p4=new Path(TEST_ROOT_DIR,"testChgrp/file1");
final Path p5=new Path(TEST_ROOT_DIR,"testChgrp/file2");
final Path p6=new Path(TEST_ROOT_DIR,"testChgrp/file3");
final String f7=new Path(TEST_ROOT_DIR,"testChgrp/file*").toUri().getPath();
writeFile(fileSys,p1);
assertTrue(fileSys.exists(p1));
change(0,null,"admin",f1);
change(1,null,"admin",f2);
change(1,null,"admin",f2,f1);
change(1,null,"admin",f3);
change(1,null,"admin",f3,f1);
writeFile(fileSys,p4);
assertTrue(fileSys.exists(p4));
writeFile(fileSys,p5);
assertTrue(fileSys.exists(p5));
writeFile(fileSys,p6);
assertTrue(fileSys.exists(p6));
change(0,null,"admin",f7);
change(1,null,"admin",f2,f7);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testInterrupt() throws Exception {
MyFsShell shell=new MyFsShell();
shell.setConf(new Configuration());
final Path d=new Path(TEST_ROOT_DIR,"testInterrupt");
final Path f1=new Path(d,"f1");
final Path f2=new Path(d,"f2");
assertTrue(fileSys.mkdirs(d));
writeFile(fileSys,f1);
assertTrue(fileSys.isFile(f1));
writeFile(fileSys,f2);
assertTrue(fileSys.isFile(f2));
int exitCode=shell.run(new String[]{"-testInterrupt",f1.toString(),f2.toString()});
assertEquals(1,InterruptCommand.processed);
assertEquals(130,exitCode);
exitCode=shell.run(new String[]{"-testInterrupt",d.toString()});
assertEquals(2,InterruptCommand.processed);
assertEquals(130,exitCode);
}
InternalCallVerifier NullVerifier
@Test public void testMultiGlob() throws IOException {
FileStatus[] status;
Path d1=new Path(USER_DIR,"dir1");
Path d11=new Path(d1,"subdir1");
Path d12=new Path(d1,"subdir2");
Path f111=new Path(d11,"f1");
fs.createNewFile(f111);
Path f112=new Path(d11,"f2");
fs.createNewFile(f112);
Path f121=new Path(d12,"f1");
fs.createNewFile(f121);
Path d2=new Path(USER_DIR,"dir2");
Path d21=new Path(d2,"subdir1");
fs.mkdirs(d21);
Path d22=new Path(d2,"subdir2");
Path f221=new Path(d22,"f1");
fs.createNewFile(f221);
Path d3=new Path(USER_DIR,"dir3");
Path f31=new Path(d3,"f1");
fs.createNewFile(f31);
Path d32=new Path(d3,"f2");
fs.mkdirs(d32);
Path f32=new Path(d3,"subdir2");
fs.createNewFile(f32);
Path d33=new Path(d3,"subdir3");
Path f333=new Path(d33,"f3");
fs.createNewFile(f333);
Path d331=new Path(d33,"f1");
Path f3311=new Path(d331,"f1");
fs.createNewFile(f3311);
Path d4=new Path(USER_DIR,"dir4");
fs.mkdirs(d4);
Path root=new Path(USER_DIR);
status=fs.globStatus(root);
checkStatus(status,root);
status=fs.globStatus(new Path(USER_DIR,"x"));
assertNull(status);
status=fs.globStatus(new Path("x"));
assertNull(status);
status=fs.globStatus(new Path(USER_DIR,"x/x"));
assertNull(status);
status=fs.globStatus(new Path("x/x"));
assertNull(status);
status=fs.globStatus(new Path(USER_DIR,"*"));
checkStatus(status,d1,d2,d3,d4);
status=fs.globStatus(new Path("*"));
checkStatus(status,d1,d2,d3,d4);
status=fs.globStatus(new Path(USER_DIR,"*/x"));
checkStatus(status);
status=fs.globStatus(new Path("*/x"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"x/*"));
checkStatus(status);
status=fs.globStatus(new Path("x/*"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"x/x/x/*"));
checkStatus(status);
status=fs.globStatus(new Path("x/x/x/*"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"*/*"));
checkStatus(status,d11,d12,d21,d22,f31,d32,f32,d33);
status=fs.globStatus(new Path("*/*"));
checkStatus(status,d11,d12,d21,d22,f31,d32,f32,d33);
status=fs.globStatus(new Path(USER_DIR,"dir*/*"));
checkStatus(status,d11,d12,d21,d22,f31,d32,f32,d33);
status=fs.globStatus(new Path("dir*/*"));
checkStatus(status,d11,d12,d21,d22,f31,d32,f32,d33);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*"));
checkStatus(status,d11,d12,d21,d22,f32,d33);
status=fs.globStatus(new Path("dir*/subdir*"));
checkStatus(status,d11,d12,d21,d22,f32,d33);
status=fs.globStatus(new Path(USER_DIR,"dir*/f*"));
checkStatus(status,f31,d32);
status=fs.globStatus(new Path("dir*/f*"));
checkStatus(status,f31,d32);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1"));
checkStatus(status,d11,d21);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/*"));
checkStatus(status,f111,f112);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/*/*"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/x"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/x*"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir2"));
checkStatus(status,d12,d22,f32);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir2/*"));
checkStatus(status,f121,f221);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir2/*/*"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir3"));
checkStatus(status,d33);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir3/*"));
checkStatus(status,d331,f333);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir3/*/*"));
checkStatus(status,f3311);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir3/*/*/*"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/f1"));
checkStatus(status,f111);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/f1*"));
checkStatus(status,f111);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/f1/*"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/f1*/*"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/f1"));
checkStatus(status,f111,f121,f221,d331);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/f1*"));
checkStatus(status,f111,f121,f221,d331);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/f1/*"));
checkStatus(status,f3311);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/f1*/*"));
checkStatus(status,f3311);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/f1*/*"));
checkStatus(status,f3311);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/f1*/x"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/f1*/*/*"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*"));
checkStatus(status,d11,d12,d21,d22,f32,d33);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/*"));
checkStatus(status,f111,f112,f121,f221,d331,f333);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/f*"));
checkStatus(status,f111,f112,f121,f221,d331,f333);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/f*/*"));
checkStatus(status,f3311);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/*/f1"));
checkStatus(status,f3311);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir*/*/*"));
checkStatus(status,f3311);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/f3"));
checkStatus(status);
status=fs.globStatus(new Path(USER_DIR,"dir*/subdir1/f3*"));
checkStatus(status);
status=fs.globStatus(new Path("{x}"));
checkStatus(status);
status=fs.globStatus(new Path("{x,y}"));
checkStatus(status);
status=fs.globStatus(new Path("dir*/{x,y}"));
checkStatus(status);
status=fs.globStatus(new Path("dir*/{f1,y}"));
checkStatus(status,f31);
status=fs.globStatus(new Path("{x,y}"));
checkStatus(status);
status=fs.globStatus(new Path("/{x/x,y/y}"));
checkStatus(status);
status=fs.globStatus(new Path("{x/x,y/y}"));
checkStatus(status);
status=fs.globStatus(new Path(Path.CUR_DIR));
checkStatus(status,new Path(USER_DIR));
status=fs.globStatus(new Path(USER_DIR + "{/dir1}"));
checkStatus(status,d1);
status=fs.globStatus(new Path(USER_DIR + "{/dir*}"));
checkStatus(status,d1,d2,d3,d4);
status=fs.globStatus(new Path(Path.SEPARATOR),trueFilter);
checkStatus(status,new Path(Path.SEPARATOR));
status=fs.globStatus(new Path(Path.CUR_DIR),trueFilter);
checkStatus(status,new Path(USER_DIR));
status=fs.globStatus(d1,trueFilter);
checkStatus(status,d1);
status=fs.globStatus(new Path(USER_DIR),trueFilter);
checkStatus(status,new Path(USER_DIR));
status=fs.globStatus(new Path(USER_DIR,"*"),trueFilter);
checkStatus(status,d1,d2,d3,d4);
status=fs.globStatus(new Path("/x/*"),trueFilter);
checkStatus(status);
status=fs.globStatus(new Path("/x"),trueFilter);
assertNull(status);
status=fs.globStatus(new Path("/x/x"),trueFilter);
assertNull(status);
PathFilter falseFilter=new PathFilter(){
@Override public boolean accept( Path path){
return false;
}
}
;
status=fs.globStatus(new Path(Path.SEPARATOR),falseFilter);
assertNull(status);
status=fs.globStatus(new Path(Path.CUR_DIR),falseFilter);
assertNull(status);
status=fs.globStatus(new Path(USER_DIR),falseFilter);
assertNull(status);
status=fs.globStatus(new Path(USER_DIR,"*"),falseFilter);
checkStatus(status);
status=fs.globStatus(new Path("/x/*"),falseFilter);
checkStatus(status);
status=fs.globStatus(new Path("/x"),falseFilter);
assertNull(status);
status=fs.globStatus(new Path("/x/x"),falseFilter);
assertNull(status);
cleanupDFS();
}
InternalCallVerifier BooleanVerifier
/**
* Perform operations such as setting quota, deletion of files, rename and
* ensure system can apply edits log during startup.
*/
@Test public void testEditsLogRename() throws Exception {
DistributedFileSystem fs=cluster.getFileSystem();
Path src1=getTestRootPath(fc,"testEditsLogRename/srcdir/src1");
Path dst1=getTestRootPath(fc,"testEditsLogRename/dstdir/dst1");
createFile(src1);
fs.mkdirs(dst1.getParent());
createFile(dst1);
fs.setQuota(dst1.getParent(),2,HdfsConstants.QUOTA_DONT_SET);
fs.delete(dst1,true);
rename(src1,dst1,true,true,false,Rename.OVERWRITE);
restartCluster();
fs=cluster.getFileSystem();
src1=getTestRootPath(fc,"testEditsLogRename/srcdir/src1");
dst1=getTestRootPath(fc,"testEditsLogRename/dstdir/dst1");
Assert.assertFalse(fs.exists(src1));
Assert.assertTrue(fs.exists(dst1));
}
InternalCallVerifier BooleanVerifier
/**
* Perform operations such as setting quota, deletion of files, rename and
* ensure system can apply edits log during startup.
*/
@Test public void testEditsLogOldRename() throws Exception {
DistributedFileSystem fs=cluster.getFileSystem();
Path src1=getTestRootPath(fc,"testEditsLogOldRename/srcdir/src1");
Path dst1=getTestRootPath(fc,"testEditsLogOldRename/dstdir/dst1");
createFile(src1);
fs.mkdirs(dst1.getParent());
createFile(dst1);
fs.setQuota(dst1.getParent(),2,HdfsConstants.QUOTA_DONT_SET);
fs.delete(dst1,true);
oldRename(src1,dst1,true,false);
restartCluster();
fs=cluster.getFileSystem();
src1=getTestRootPath(fc,"testEditsLogOldRename/srcdir/src1");
dst1=getTestRootPath(fc,"testEditsLogOldRename/dstdir/dst1");
Assert.assertFalse(fs.exists(src1));
Assert.assertTrue(fs.exists(dst1));
}
InternalCallVerifier BooleanVerifier
@Test public void testPositiveListFilesNotEndInColon() throws Exception {
final URI uri=new URI("har://file-localhost" + harPath.toString());
harFileSystem.initialize(uri,conf);
Path p1=new Path("har://file-localhost" + harPath.toString());
Path p2=harFileSystem.makeQualified(p1);
assertTrue(p2.toUri().toString().startsWith("har://file-localhost/"));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListLocatedStatus() throws Exception {
String testHarPath=this.getClass().getResource("/test.har").getPath();
URI uri=new URI("har://" + testHarPath);
HarFileSystem hfs=new HarFileSystem(localFileSystem);
hfs.initialize(uri,new Configuration());
Set expectedFileNames=new HashSet();
expectedFileNames.add("1.txt");
expectedFileNames.add("2.txt");
Path path=new Path("dir1");
RemoteIterator fileList=hfs.listLocatedStatus(path);
while (fileList.hasNext()) {
String fileName=fileList.next().getPath().getName();
assertTrue(fileName + " not in expected files list",expectedFileNames.contains(fileName));
expectedFileNames.remove(fileName);
}
assertEquals("Didn't find all of the expected file names: " + expectedFileNames,0,expectedFileNames.size());
}
InternalCallVerifier EqualityVerifier
@Test public void testPositiveHarFileSystemBasics() throws Exception {
assertEquals(HarFileSystem.VERSION,harFileSystem.getHarVersion());
final URI harUri=harFileSystem.getUri();
assertEquals(harPath.toUri().getPath(),harUri.getPath());
assertEquals("har",harUri.getScheme());
final Path homePath=harFileSystem.getHomeDirectory();
assertEquals(harPath.toUri().getPath(),homePath.toUri().getPath());
final Path workDirPath0=harFileSystem.getWorkingDirectory();
assertEquals(homePath,workDirPath0);
harFileSystem.setWorkingDirectory(new Path("/foo/bar"));
assertEquals(workDirPath0,harFileSystem.getWorkingDirectory());
}
UtilityVerifier InternalCallVerifier
@Test public void testNegativeGetHarVersionOnNotInitializedFS() throws Exception {
final HarFileSystem hfs=new HarFileSystem(localFileSystem);
try {
int version=hfs.getHarVersion();
Assert.fail("Exception expected, but got a Har version " + version + ".");
}
catch ( IOException ioe) {
}
}
InternalCallVerifier BooleanVerifier
@Test public void testPositiveNewHarFsOnTheSameUnderlyingFs() throws Exception {
final HarFileSystem hfs=new HarFileSystem(localFileSystem);
final URI uri=new URI("har://" + harPath.toString());
hfs.initialize(uri,new Configuration());
assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMakeQualifiedPath() throws Exception {
String harPathWithUserinfo="har://file-user:passwd@localhost:80" + harPath.toUri().getPath().toString();
Path path=new Path(harPathWithUserinfo);
Path qualifiedPath=path.getFileSystem(conf).makeQualified(path);
assertTrue(String.format("The qualified path (%s) did not match the expected path (%s).",qualifiedPath.toString(),harPathWithUserinfo),qualifiedPath.toString().equals(harPathWithUserinfo));
}
InternalCallVerifier BooleanVerifier
@Test public void testPositiveLruMetadataCacheFs() throws Exception {
HarFileSystem hfs=new HarFileSystem(localFileSystem);
URI uri=new URI("har://" + harPath.toString());
hfs.initialize(uri,new Configuration());
assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
for (int i=0; i <= hfs.METADATA_CACHE_ENTRIES_DEFAULT; i++) {
Path p=new Path(rootPath,"path1/path2/my" + i + ".har");
createHarFileSystem(conf,p);
}
hfs=new HarFileSystem(localFileSystem);
uri=new URI("har://" + harPath.toString());
hfs.initialize(uri,new Configuration());
assertTrue(hfs.getMetadata() != harFileSystem.getMetadata());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a file
*/
@Test public void testFile() throws IOException {
fs.mkdirs(TEST_DIR);
writeFile(fs,FILE1,FILE_LEN);
RemoteIterator itor=fs.listFiles(FILE1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fs.listFiles(FILE1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
fs.delete(FILE1,true);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a directory
*/
@Test public void testDirectory() throws IOException {
fs.mkdirs(DIR1);
RemoteIterator itor=fs.listFiles(DIR1,true);
assertFalse(itor.hasNext());
itor=fs.listFiles(DIR1,false);
assertFalse(itor.hasNext());
writeFile(fs,FILE2,FILE_LEN);
itor=fs.listFiles(DIR1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fs.listFiles(DIR1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fs.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
writeFile(fs,FILE1,FILE_LEN);
writeFile(fs,FILE3,FILE_LEN);
Set filesToFind=new HashSet();
filesToFind.add(fs.makeQualified(FILE1));
filesToFind.add(fs.makeQualified(FILE2));
filesToFind.add(fs.makeQualified(FILE3));
itor=fs.listFiles(TEST_DIR,true);
stat=itor.next();
assertTrue(stat.isFile());
assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath()));
stat=itor.next();
assertTrue(stat.isFile());
assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath()));
stat=itor.next();
assertTrue(stat.isFile());
assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath()));
assertFalse(itor.hasNext());
assertTrue(filesToFind.isEmpty());
itor=fs.listFiles(TEST_DIR,false);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fs.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
fs.delete(TEST_DIR,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testCreateManyFiles() throws Exception {
if (isWindows) return;
String dir5=buildBufferDir(ROOT,5);
String dir6=buildBufferDir(ROOT,6);
try {
conf.set(CONTEXT,dir5 + "," + dir6);
assertTrue(localFs.mkdirs(new Path(dir5)));
assertTrue(localFs.mkdirs(new Path(dir6)));
int inDir5=0, inDir6=0;
for (int i=0; i < TRIALS; ++i) {
File result=createTempFile();
if (result.getPath().startsWith(new Path(dir5,FILENAME).toUri().getPath())) {
inDir5++;
}
else if (result.getPath().startsWith(new Path(dir6,FILENAME).toUri().getPath())) {
inDir6++;
}
result.delete();
}
assertTrue(inDir5 + inDir6 == TRIALS);
}
finally {
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test that {@link LocalDirAllocator#getAllLocalPathsToRead(String,Configuration)}
* returns correct filenames and "file" schema.
* @throws IOException
*/
@Test(timeout=30000) public void testGetAllLocalPathsToRead() throws IOException {
assumeTrue(!isWindows);
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir0)));
assertTrue(localFs.mkdirs(new Path(dir1)));
localFs.create(new Path(dir0 + Path.SEPARATOR + FILENAME));
localFs.create(new Path(dir1 + Path.SEPARATOR + FILENAME));
final Iterable pathIterable=dirAllocator.getAllLocalPathsToRead(FILENAME,conf);
int count=0;
for ( final Path p : pathIterable) {
count++;
assertEquals(FILENAME,p.getName());
assertEquals("file",p.getFileSystem(conf).getUri().getScheme());
}
assertEquals(2,count);
try {
Path p=pathIterable.iterator().next();
assertFalse("NoSuchElementException must be thrown, but returned [" + p + "] instead.",true);
}
catch ( NoSuchElementException nsee) {
}
final Iterable pathIterable2=dirAllocator.getAllLocalPathsToRead(FILENAME,conf);
final Iterator it=pathIterable2.iterator();
try {
it.remove();
assertFalse(true);
}
catch ( UnsupportedOperationException uoe) {
}
}
finally {
Shell.execCommand(new String[]{"chmod","u+w",BUFFER_DIR_ROOT});
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
/**
* Test no side effect files are left over. After creating a temp
* temp file, remove both the temp file and its parent. Verify that
* no files or directories are left over as can happen when File objects
* are mistakenly created from fully qualified path strings.
* @throws IOException
*/
@Test(timeout=30000) public void testNoSideEffects() throws IOException {
assumeTrue(!isWindows);
String dir=buildBufferDir(ROOT,0);
try {
conf.set(CONTEXT,dir);
File result=dirAllocator.createTmpFileForWrite(FILENAME,-1,conf);
assertTrue(result.delete());
assertTrue(result.getParentFile().delete());
assertFalse(new File(dir).exists());
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test getLocalPathToRead() returns correct filename and "file" schema.
* @throws IOException
*/
@Test(timeout=30000) public void testGetLocalPathToRead() throws IOException {
assumeTrue(!isWindows);
String dir=buildBufferDir(ROOT,0);
try {
conf.set(CONTEXT,dir);
assertTrue(localFs.mkdirs(new Path(dir)));
File f1=dirAllocator.createTmpFileForWrite(FILENAME,SMALL_FILE_SIZE,conf);
Path p1=dirAllocator.getLocalPathToRead(f1.getName(),conf);
assertEquals(f1.getName(),p1.getName());
assertEquals("file",p1.getFileSystem(conf).getUri().getScheme());
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Two buffer dirs. Both exists and on a R/W disk.
* Later disk1 becomes read-only.
* @throws Exception
*/
@Test(timeout=30000) public void testRWBufferDirBecomesRO() throws Exception {
if (isWindows) return;
String dir3=buildBufferDir(ROOT,3);
String dir4=buildBufferDir(ROOT,4);
try {
conf.set(CONTEXT,dir3 + "," + dir4);
assertTrue(localFs.mkdirs(new Path(dir3)));
assertTrue(localFs.mkdirs(new Path(dir4)));
createTempFile(SMALL_FILE_SIZE);
int nextDirIdx=(dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
validateTempDirCreation(buildBufferDir(ROOT,nextDirIdx));
new File(new Path(dir4).toUri().getPath()).setReadOnly();
validateTempDirCreation(dir3);
validateTempDirCreation(dir3);
}
finally {
rmBufferDirs();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Two buffer dirs. The first dir does not exist & is on a read-only disk;
* The second dir exists & is RW
* getLocalPathForWrite with checkAccess set to false should create a parent
* directory. With checkAccess true, the directory should not be created.
* @throws Exception
*/
@Test(timeout=30000) public void testLocalPathForWriteDirCreation() throws IOException {
String dir0=buildBufferDir(ROOT,0);
String dir1=buildBufferDir(ROOT,1);
try {
conf.set(CONTEXT,dir0 + "," + dir1);
assertTrue(localFs.mkdirs(new Path(dir1)));
BUFFER_ROOT.setReadOnly();
Path p1=dirAllocator.getLocalPathForWrite("p1/x",SMALL_FILE_SIZE,conf);
assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
Path p2=dirAllocator.getLocalPathForWrite("p2/x",SMALL_FILE_SIZE,conf,false);
try {
localFs.getFileStatus(p2.getParent());
}
catch ( Exception e) {
assertEquals(e.getClass(),FileNotFoundException.class);
}
}
finally {
Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT));
rmBufferDirs();
}
}
InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testListStatusWithColons() throws IOException {
assumeTrue(!Shell.WINDOWS);
File colonFile=new File(TEST_ROOT_DIR,"foo:bar");
colonFile.mkdirs();
FileStatus[] stats=fileSys.listStatus(new Path(TEST_ROOT_DIR));
assertEquals("Unexpected number of stats",1,stats.length);
assertEquals("Bad path from stat",colonFile.getAbsolutePath(),stats[0].getPath().toUri().getPath());
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testReportChecksumFailure() throws IOException {
base.mkdirs();
assertTrue(base.exists() && base.isDirectory());
final File dir1=new File(base,"dir1");
final File dir2=new File(dir1,"dir2");
dir2.mkdirs();
assertTrue(dir2.exists() && FileUtil.canWrite(dir2));
final String dataFileName="corruptedData";
final Path dataPath=new Path(new File(dir2,dataFileName).toURI());
final Path checksumPath=fileSys.getChecksumFile(dataPath);
final FSDataOutputStream fsdos=fileSys.create(dataPath);
try {
fsdos.writeUTF("foo");
}
finally {
fsdos.close();
}
assertTrue(fileSys.pathToFile(dataPath).exists());
final long dataFileLength=fileSys.getFileStatus(dataPath).getLen();
assertTrue(dataFileLength > 0);
assertTrue(fileSys.pathToFile(checksumPath).exists());
final long checksumFileLength=fileSys.getFileStatus(checksumPath).getLen();
assertTrue(checksumFileLength > 0);
FileUtil.setWritable(base,false);
FSDataInputStream dataFsdis=fileSys.open(dataPath);
FSDataInputStream checksumFsdis=fileSys.open(checksumPath);
boolean retryIsNecessary=fileSys.reportChecksumFailure(dataPath,dataFsdis,0,checksumFsdis,0);
assertTrue(!retryIsNecessary);
assertTrue(!fileSys.pathToFile(dataPath).exists());
assertTrue(!fileSys.pathToFile(checksumPath).exists());
File[] dir1files=dir1.listFiles(new FileFilter(){
@Override public boolean accept( File pathname){
return pathname != null && !pathname.getName().equals("dir2");
}
}
);
assertTrue(dir1files != null);
assertTrue(dir1files.length == 1);
File badFilesDir=dir1files[0];
File[] badFiles=badFilesDir.listFiles();
assertTrue(badFiles != null);
assertTrue(badFiles.length == 2);
boolean dataFileFound=false;
boolean checksumFileFound=false;
for ( File badFile : badFiles) {
if (badFile.getName().startsWith(dataFileName)) {
assertTrue(dataFileLength == badFile.length());
dataFileFound=true;
}
else if (badFile.getName().contains(dataFileName + ".crc")) {
assertTrue(checksumFileLength == badFile.length());
checksumFileFound=true;
}
}
assertTrue(dataFileFound);
assertTrue(checksumFileFound);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testHomeDirectory() throws IOException {
Path home=new Path(System.getProperty("user.home")).makeQualified(fileSys);
Path fsHome=fileSys.getHomeDirectory();
assertEquals(home,fsHome);
}
InternalCallVerifier BooleanVerifier
/**
* Tests that renaming a directory replaces the destination if the destination
* is an existing empty directory.
* Before:
* /dir1
* /file1
* /file2
* /dir2
* After rename("/dir1", "/dir2"):
* /dir2
* /file1
* /file2
*/
@Test public void testRenameReplaceExistingEmptyDirectory() throws IOException {
Path src=new Path(TEST_ROOT_DIR,"dir1");
Path dst=new Path(TEST_ROOT_DIR,"dir2");
fileSys.delete(src,true);
fileSys.delete(dst,true);
assertTrue(fileSys.mkdirs(src));
writeFile(fileSys,new Path(src,"file1"),1);
writeFile(fileSys,new Path(src,"file2"),1);
assertTrue(fileSys.mkdirs(dst));
assertTrue(fileSys.rename(src,dst));
assertTrue(fileSys.exists(dst));
assertTrue(fileSys.exists(new Path(dst,"file1")));
assertTrue(fileSys.exists(new Path(dst,"file2")));
assertFalse(fileSys.exists(src));
}
InternalCallVerifier BooleanVerifier
/**
* Test deleting a file, directory, and non-existent path
*/
@Test(timeout=1000) public void testBasicDelete() throws IOException {
Path dir1=new Path(TEST_ROOT_DIR,"dir1");
Path file1=new Path(TEST_ROOT_DIR,"file1");
Path file2=new Path(TEST_ROOT_DIR + "/dir1","file2");
Path file3=new Path(TEST_ROOT_DIR,"does-not-exist");
assertTrue(fileSys.mkdirs(dir1));
writeFile(fileSys,file1,1);
writeFile(fileSys,file2,1);
assertFalse("Returned true deleting non-existant path",fileSys.delete(file3));
assertTrue("Did not delete file",fileSys.delete(file1));
assertTrue("Did not delete non-empty dir",fileSys.delete(dir1));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testCreateFileAndMkdirs() throws IOException {
Path test_dir=new Path(TEST_ROOT_DIR,"test_dir");
Path test_file=new Path(test_dir,"file1");
assertTrue(fileSys.mkdirs(test_dir));
final int fileSize=new Random().nextInt(1 << 20) + 1;
writeFile(fileSys,test_file,fileSize);
{
final FileStatus status=fileSys.getFileStatus(test_file);
Assert.assertEquals(fileSize,status.getLen());
final ContentSummary summary=fileSys.getContentSummary(test_dir);
Assert.assertEquals(fileSize,summary.getLength());
}
Path bad_dir=new Path(test_file,"another_dir");
try {
fileSys.mkdirs(bad_dir);
fail("Failed to detect existing file in path");
}
catch ( ParentNotDirectoryException e) {
}
try {
fileSys.mkdirs(null);
fail("Failed to detect null in mkdir arg");
}
catch ( IllegalArgumentException e) {
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests a simple rename of a directory.
*/
@Test public void testRenameDirectory() throws IOException {
Path src=new Path(TEST_ROOT_DIR,"dir1");
Path dst=new Path(TEST_ROOT_DIR,"dir2");
fileSys.delete(src,true);
fileSys.delete(dst,true);
assertTrue(fileSys.mkdirs(src));
assertTrue(fileSys.rename(src,dst));
assertTrue(fileSys.exists(dst));
assertFalse(fileSys.exists(src));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testCopy() throws IOException {
Path src=new Path(TEST_ROOT_DIR,"dingo");
Path dst=new Path(TEST_ROOT_DIR,"yak");
writeFile(fileSys,src,1);
assertTrue(FileUtil.copy(fileSys,src,fileSys,dst,true,false,conf));
assertTrue(!fileSys.exists(src) && fileSys.exists(dst));
assertTrue(FileUtil.copy(fileSys,dst,fileSys,src,false,false,conf));
assertTrue(fileSys.exists(src) && fileSys.exists(dst));
assertTrue(FileUtil.copy(fileSys,src,fileSys,dst,true,true,conf));
assertTrue(!fileSys.exists(src) && fileSys.exists(dst));
fileSys.mkdirs(src);
assertTrue(FileUtil.copy(fileSys,dst,fileSys,src,false,false,conf));
Path tmp=new Path(src,dst.getName());
assertTrue(fileSys.exists(tmp) && fileSys.exists(dst));
assertTrue(FileUtil.copy(fileSys,dst,fileSys,src,false,true,conf));
assertTrue(fileSys.delete(tmp,true));
fileSys.mkdirs(tmp);
try {
FileUtil.copy(fileSys,dst,fileSys,src,true,true,conf);
fail("Failed to detect existing dir");
}
catch ( IOException e) {
}
}
APIUtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test public void testListStatusReturnConsistentPathOnWindows() throws IOException {
assumeTrue(Shell.WINDOWS);
String dirNoDriveSpec=TEST_ROOT_DIR;
if (dirNoDriveSpec.charAt(1) == ':') dirNoDriveSpec=dirNoDriveSpec.substring(2);
File file=new File(dirNoDriveSpec,"foo");
file.mkdirs();
FileStatus[] stats=fileSys.listStatus(new Path(dirNoDriveSpec));
assertEquals("Unexpected number of stats",1,stats.length);
assertEquals("Bad path from stat",new Path(file.getPath()).toUri().getPath(),stats[0].getPath().toUri().getPath());
}
InternalCallVerifier BooleanVerifier
/**
* Tests that renaming a directory to an existing directory that is not empty
* results in a full copy of source to destination.
* Before:
* /dir1
* /dir2
* /dir3
* /file1
* /file2
* After rename("/dir1/dir2/dir3", "/dir1"):
* /dir1
* /dir3
* /file1
* /file2
*/
@Test public void testRenameMoveToExistingNonEmptyDirectory() throws IOException {
Path src=new Path(TEST_ROOT_DIR,"dir1/dir2/dir3");
Path dst=new Path(TEST_ROOT_DIR,"dir1");
fileSys.delete(src,true);
fileSys.delete(dst,true);
assertTrue(fileSys.mkdirs(src));
writeFile(fileSys,new Path(src,"file1"),1);
writeFile(fileSys,new Path(src,"file2"),1);
assertTrue(fileSys.exists(dst));
assertTrue(fileSys.rename(src,dst));
assertTrue(fileSys.exists(dst));
assertTrue(fileSys.exists(new Path(dst,"dir3")));
assertTrue(fileSys.exists(new Path(dst,"dir3/file1")));
assertTrue(fileSys.exists(new Path(dst,"dir3/file2")));
assertFalse(fileSys.exists(src));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testPathEscapes() throws IOException {
Path path=new Path(TEST_ROOT_DIR,"foo%bar");
writeFile(fileSys,path,1);
FileStatus status=fileSys.getFileStatus(path);
assertEquals(path.makeQualified(fileSys),status.getPath());
cleanupFile(fileSys,path);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the capability of setting the working directory.
*/
@Test(timeout=10000) public void testWorkingDirectory() throws IOException {
Path origDir=fileSys.getWorkingDirectory();
Path subdir=new Path(TEST_ROOT_DIR,"new");
try {
assertTrue(!fileSys.exists(subdir));
assertTrue(fileSys.mkdirs(subdir));
assertTrue(fileSys.isDirectory(subdir));
fileSys.setWorkingDirectory(subdir);
Path dir1=new Path("dir1");
assertTrue(fileSys.mkdirs(dir1));
assertTrue(fileSys.isDirectory(dir1));
fileSys.delete(dir1,true);
assertTrue(!fileSys.exists(dir1));
Path file1=new Path("file1");
Path file2=new Path("sub/file2");
String contents=writeFile(fileSys,file1,1);
fileSys.copyFromLocalFile(file1,file2);
assertTrue(fileSys.exists(file1));
assertTrue(fileSys.isFile(file1));
cleanupFile(fileSys,file2);
fileSys.copyToLocalFile(file1,file2);
cleanupFile(fileSys,file2);
fileSys.rename(file1,file2);
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
fileSys.rename(file2,file1);
InputStream stm=fileSys.open(file1);
byte[] buffer=new byte[3];
int bytesRead=stm.read(buffer,0,3);
assertEquals(contents,new String(buffer,0,bytesRead));
stm.close();
}
finally {
fileSys.setWorkingDirectory(origDir);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStripFragmentFromPath() throws Exception {
FileSystem fs=FileSystem.getLocal(new Configuration());
Path pathQualified=TEST_PATH.makeQualified(fs.getUri(),fs.getWorkingDirectory());
Path pathWithFragment=new Path(new URI(pathQualified.toString() + "#glacier"));
FileSystemTestHelper.createFile(fs,pathWithFragment);
Path resolved=fs.resolvePath(pathWithFragment);
assertEquals("resolvePath did not strip fragment from Path",pathQualified,resolved);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testSetTimes() throws Exception {
Path path=new Path(TEST_ROOT_DIR,"set-times");
writeFile(fileSys,path,1);
long newModTime=12345000;
FileStatus status=fileSys.getFileStatus(path);
assertTrue("check we're actually changing something",newModTime != status.getModificationTime());
long accessTime=status.getAccessTime();
fileSys.setTimes(path,newModTime,-1);
status=fileSys.getFileStatus(path);
assertEquals(newModTime,status.getModificationTime());
assertEquals(accessTime,status.getAccessTime());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testURI() throws URISyntaxException, IOException {
URI uri=new URI("file:///bar#baz");
Path path=new Path(uri);
assertTrue(uri.equals(new URI(path.toString())));
FileSystem fs=path.getFileSystem(new Configuration());
assertTrue(uri.equals(new URI(fs.makeQualified(path).toString())));
URI uri2=new URI("file:///bar/baz");
assertTrue(uri2.equals(new URI(fs.makeQualified(new Path(uri2)).toString())));
assertEquals("foo://bar/baz#boo",new Path("foo://bar/",new Path(new URI("/baz#boo"))).toString());
assertEquals("foo://bar/baz/fud#boo",new Path(new Path(new URI("foo://bar/baz#bud")),new Path(new URI("fud#boo"))).toString());
assertEquals("foo://bar/fud#boo",new Path(new Path(new URI("foo://bar/baz#bud")),new Path(new URI("/fud#boo"))).toString());
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=30000) public void testGlobEscapeStatus() throws Exception {
if (Shell.WINDOWS) return;
FileSystem lfs=FileSystem.getLocal(new Configuration());
Path testRoot=lfs.makeQualified(new Path(System.getProperty("test.build.data","test/build/data"),"testPathGlob"));
lfs.delete(testRoot,true);
lfs.mkdirs(testRoot);
assertTrue(lfs.isDirectory(testRoot));
lfs.setWorkingDirectory(testRoot);
Path paths[]=new Path[]{new Path(testRoot,"*/f"),new Path(testRoot,"d1/f"),new Path(testRoot,"d2/f")};
Arrays.sort(paths);
for ( Path p : paths) {
lfs.create(p).close();
assertTrue(lfs.exists(p));
}
FileStatus stats[]=lfs.listStatus(new Path(testRoot,"*"));
assertEquals(1,stats.length);
assertEquals(new Path(testRoot,"*/f"),stats[0].getPath());
stats=lfs.globStatus(new Path(testRoot,"*"));
Arrays.sort(stats);
Path parentPaths[]=new Path[paths.length];
for (int i=0; i < paths.length; i++) {
parentPaths[i]=paths[i].getParent();
}
assertEquals(mergeStatuses(parentPaths),mergeStatuses(stats));
stats=lfs.globStatus(new Path(testRoot,"\\*"));
assertEquals(1,stats.length);
assertEquals(new Path(testRoot,"*"),stats[0].getPath());
stats=lfs.globStatus(new Path(testRoot,"*/f"));
assertEquals(paths.length,stats.length);
assertEquals(mergeStatuses(paths),mergeStatuses(stats));
stats=lfs.globStatus(new Path(testRoot,"\\*/f"));
assertEquals(1,stats.length);
assertEquals(new Path(testRoot,"*/f"),stats[0].getPath());
stats=lfs.globStatus(new Path(testRoot,"\\*/*"));
assertEquals(1,stats.length);
assertEquals(new Path(testRoot,"*/f"),stats[0].getPath());
}
BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Tests resolution of an hdfs symlink to the local file system.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testFcResolveAfs() throws IOException, InterruptedException {
Configuration conf=new Configuration();
FileContext fcLocal=FileContext.getLocalFSFileContext();
FileContext fcHdfs=FileContext.getFileContext(cluster.getFileSystem().getUri());
final String localTestRoot=helper.getAbsoluteTestRootDir(fcLocal);
Path alphaLocalPath=new Path(fcLocal.getDefaultFileSystem().getUri().toString(),new File(localTestRoot,"alpha").getAbsolutePath());
DFSTestUtil.createFile(FileSystem.getLocal(conf),alphaLocalPath,16,(short)1,2);
Path linkTarget=new Path(fcLocal.getDefaultFileSystem().getUri().toString(),localTestRoot);
Path hdfsLink=new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),"/tmp/link");
fcHdfs.createSymlink(linkTarget,hdfsLink,true);
Path alphaHdfsPathViaLink=new Path(fcHdfs.getDefaultFileSystem().getUri().toString() + "/tmp/link/alpha");
Set afsList=fcHdfs.resolveAbstractFileSystems(alphaHdfsPathViaLink);
Assert.assertEquals(2,afsList.size());
for ( AbstractFileSystem afs : afsList) {
if ((!afs.equals(fcHdfs.getDefaultFileSystem())) && (!afs.equals(fcLocal.getDefaultFileSystem()))) {
Assert.fail("Failed to resolve AFS correctly");
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testStat() throws Exception {
Assume.assumeTrue(Stat.isAvailable());
FileSystem fs=FileSystem.getLocal(new Configuration());
Path testDir=new Path(getTestRootPath(fs),"teststat");
fs.mkdirs(testDir);
Path sub1=new Path(testDir,"sub1");
Path sub2=new Path(testDir,"sub2");
fs.mkdirs(sub1);
fs.createSymlink(sub1,sub2,false);
FileStatus stat1=new Stat(sub1,4096l,false,fs).getFileStatus();
FileStatus stat2=new Stat(sub2,0,false,fs).getFileStatus();
assertTrue(stat1.isDirectory());
assertFalse(stat2.isDirectory());
fs.delete(testDir,true);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testLinkAcrossFileSystems() throws IOException {
Path localDir=new Path("file://" + wrapper.getAbsoluteTestRootDir() + "/test");
Path localFile=new Path("file://" + wrapper.getAbsoluteTestRootDir() + "/test/file");
Path link=new Path(testBaseDir1(),"linkToFile");
FSTestWrapper localWrapper=wrapper.getLocalFSWrapper();
localWrapper.delete(localDir,true);
localWrapper.mkdir(localDir,FileContext.DEFAULT_PERM,true);
localWrapper.setWorkingDirectory(localDir);
assertEquals(localDir,localWrapper.getWorkingDirectory());
createAndWriteFile(localWrapper,localFile);
wrapper.createSymlink(localFile,link,false);
readFile(link);
assertEquals(fileSize,wrapper.getFileStatus(link).getLen());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testSetPermissionAffectsTarget() throws IOException {
Path file=new Path(testBaseDir1(),"file");
Path dir=new Path(testBaseDir2());
Path linkToFile=new Path(testBaseDir1(),"linkToFile");
Path linkToDir=new Path(testBaseDir1(),"linkToDir");
createAndWriteFile(file);
wrapper.createSymlink(file,linkToFile,false);
wrapper.createSymlink(dir,linkToDir,false);
FsPermission perms=wrapper.getFileLinkStatus(linkToFile).getPermission();
wrapper.setPermission(linkToFile,new FsPermission((short)0664));
wrapper.setOwner(linkToFile,"user","group");
assertEquals(perms,wrapper.getFileLinkStatus(linkToFile).getPermission());
FileStatus stat=wrapper.getFileStatus(file);
assertEquals(0664,stat.getPermission().toShort());
assertEquals("user",stat.getOwner());
assertEquals("group",stat.getGroup());
assertEquals(stat.getPermission(),wrapper.getFileStatus(linkToFile).getPermission());
perms=wrapper.getFileLinkStatus(linkToDir).getPermission();
wrapper.setPermission(linkToDir,new FsPermission((short)0664));
wrapper.setOwner(linkToDir,"user","group");
assertEquals(perms,wrapper.getFileLinkStatus(linkToDir).getPermission());
stat=wrapper.getFileStatus(dir);
assertEquals(0664,stat.getPermission().toShort());
assertEquals("user",stat.getOwner());
assertEquals("group",stat.getGroup());
assertEquals(stat.getPermission(),wrapper.getFileStatus(linkToDir).getPermission());
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testCreateLinkToSlash() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
Path link=new Path(testBaseDir1(),"linkToSlash");
Path fileViaLink=new Path(testBaseDir1() + "/linkToSlash" + testBaseDir1()+ "/file");
createAndWriteFile(file);
wrapper.setWorkingDirectory(dir);
wrapper.createSymlink(new Path("/"),link,false);
readFile(fileViaLink);
assertEquals(fileSize,wrapper.getFileStatus(fileViaLink).getLen());
if (wrapper instanceof FileContextTestWrapper) {
FSTestWrapper localWrapper=wrapper.getLocalFSWrapper();
Path linkQual=new Path(cluster.getURI(0).toString(),fileViaLink);
assertEquals(fileSize,localWrapper.getFileStatus(linkQual).getLen());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCreateLinkMaxPathLink() throws IOException {
Path dir=new Path(testBaseDir1());
Path file=new Path(testBaseDir1(),"file");
final int maxPathLen=HdfsConstants.MAX_PATH_LENGTH;
final int dirLen=dir.toString().length() + 1;
int len=maxPathLen - dirLen;
StringBuilder sb=new StringBuilder("");
for (int i=0; i < (len / 10); i++) {
sb.append("0123456789");
}
for (int i=0; i < (len % 10); i++) {
sb.append("x");
}
Path link=new Path(sb.toString());
assertEquals(maxPathLen,dirLen + link.toString().length());
createAndWriteFile(file);
wrapper.setWorkingDirectory(dir);
wrapper.createSymlink(file,link,false);
readFile(link);
link=new Path(sb.toString() + "x");
try {
wrapper.createSymlink(file,link,false);
fail("Path name should be too long");
}
catch ( IOException x) {
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testDanglingLink() throws IOException {
assumeTrue(!Path.WINDOWS);
Path fileAbs=new Path(testBaseDir1() + "/file");
Path fileQual=new Path(testURI().toString(),fileAbs);
Path link=new Path(testBaseDir1() + "/linkToFile");
Path linkQual=new Path(testURI().toString(),link.toString());
wrapper.createSymlink(fileAbs,link,false);
FileUtil.fullyDelete(new File(link.toUri().getPath()));
wrapper.createSymlink(fileAbs,link,false);
try {
wrapper.getFileStatus(link);
fail("Got FileStatus for dangling link");
}
catch ( FileNotFoundException f) {
}
UserGroupInformation user=UserGroupInformation.getCurrentUser();
FileStatus fsd=wrapper.getFileLinkStatus(link);
assertEquals(fileQual,fsd.getSymlink());
assertTrue(fsd.isSymlink());
assertFalse(fsd.isDirectory());
assertEquals(user.getUserName(),fsd.getOwner());
assertEquals(user.getGroupNames()[0],fsd.getGroup());
assertEquals(linkQual,fsd.getPath());
try {
readFile(link);
fail("Got FileStatus for dangling link");
}
catch ( FileNotFoundException f) {
}
createAndWriteFile(fileAbs);
wrapper.getFileStatus(link);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test opening and reading from an InputStream through a hdfs:// URL.
*
* First generate a file with some content through the FileSystem API, then
* try to open and read the file through the URL stream API.
* @throws IOException
*/
@Test public void testDfsUrls() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
FsUrlStreamHandlerFactory factory=new org.apache.hadoop.fs.FsUrlStreamHandlerFactory();
java.net.URL.setURLStreamHandlerFactory(factory);
Path filePath=new Path("/thefile");
try {
byte[] fileContent=new byte[1024];
for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i;
OutputStream os=fs.create(filePath);
os.write(fileContent);
os.close();
URI uri=fs.getUri();
URL fileURL=new URL(uri.getScheme(),uri.getHost(),uri.getPort(),filePath.toString());
InputStream is=fileURL.openStream();
assertNotNull(is);
byte[] bytes=new byte[4096];
assertEquals(1024,is.read(bytes));
is.close();
for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]);
fs.delete(filePath,false);
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testXAttrHashCode(){
assertEquals(XATTR.hashCode(),XATTR1.hashCode());
assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
assertFalse(XATTR4.hashCode() == XATTR5.hashCode());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSetOwnerOnFolder() throws Exception {
Path newFolder=new Path("testOwner");
assertTrue(fs.mkdirs(newFolder));
fs.setOwner(newFolder,"newUser",null);
FileStatus newStatus=fs.getFileStatus(newFolder);
assertNotNull(newStatus);
assertEquals("newUser",newStatus.getOwner());
assertTrue(newStatus.isDirectory());
}
InternalCallVerifier BooleanVerifier
@Test public void testFolderLastModifiedTime() throws Exception {
Path parentFolder=new Path("testFolder");
Path innerFile=new Path(parentFolder,"innerfile");
assertTrue(fs.mkdirs(parentFolder));
long lastModifiedTime=fs.getFileStatus(parentFolder).getModificationTime();
Thread.sleep(modifiedTimeErrorMargin + 1);
assertTrue(fs.createNewFile(innerFile));
assertFalse(testModifiedTime(parentFolder,lastModifiedTime));
testModifiedTime(parentFolder);
lastModifiedTime=fs.getFileStatus(parentFolder).getModificationTime();
Path destFolder=new Path("testDestFolder");
assertTrue(fs.mkdirs(destFolder));
long destLastModifiedTime=fs.getFileStatus(destFolder).getModificationTime();
Thread.sleep(modifiedTimeErrorMargin + 1);
Path destFile=new Path(destFolder,"innerfile");
assertTrue(fs.rename(innerFile,destFile));
assertFalse(testModifiedTime(parentFolder,lastModifiedTime));
assertFalse(testModifiedTime(destFolder,destLastModifiedTime));
testModifiedTime(parentFolder);
testModifiedTime(destFolder);
destLastModifiedTime=fs.getFileStatus(destFolder).getModificationTime();
Thread.sleep(modifiedTimeErrorMargin + 1);
fs.delete(destFile,false);
assertFalse(testModifiedTime(destFolder,destLastModifiedTime));
testModifiedTime(destFolder);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testListSlash() throws Exception {
Path testFolder=new Path("/testFolder");
Path testFile=new Path(testFolder,"testFile");
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.createNewFile(testFile));
FileStatus status=fs.getFileStatus(new Path("/testFolder/."));
assertNotNull(status);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testUriEncodingMoreComplexCharacters() throws Exception {
String fileName="!#$'()*;=[]%";
String directoryName="*;=[]%!#$'()";
fs.create(new Path(directoryName,fileName)).close();
FileStatus[] listing=fs.listStatus(new Path(directoryName));
assertEquals(1,listing.length);
assertEquals(fileName,listing[0].getPath().getName());
FileStatus status=fs.getFileStatus(new Path(directoryName,fileName));
assertEquals(fileName,status.getPath().getName());
InputStream stream=fs.open(new Path(directoryName,fileName));
assertNotNull(stream);
stream.close();
assertTrue(fs.delete(new Path(directoryName,fileName),true));
assertTrue(fs.delete(new Path(directoryName),true));
}
InternalCallVerifier BooleanVerifier
@Test public void testDeepFileCreation() throws Exception {
Path testFile=new Path("deep/file/creation/test");
FsPermission permission=FsPermission.createImmutable((short)644);
createEmptyFile(testFile,permission);
assertTrue(fs.exists(testFile));
assertTrue(fs.exists(new Path("deep")));
assertTrue(fs.exists(new Path("deep/file/creation")));
FileStatus ret=fs.getFileStatus(new Path("deep/file"));
assertTrue(ret.isDirectory());
assertEqualsIgnoreStickyBit(permission,ret.getPermission());
assertTrue(fs.delete(new Path("deep"),true));
assertFalse(fs.exists(testFile));
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testRenameImplicitFolder() throws Exception {
Path testFile=new Path("deep/file/rename/test");
FsPermission permission=FsPermission.createImmutable((short)644);
createEmptyFile(testFile,permission);
assertTrue(fs.rename(new Path("deep/file"),new Path("deep/renamed")));
assertFalse(fs.exists(testFile));
FileStatus newStatus=fs.getFileStatus(new Path("deep/renamed/rename/test"));
assertNotNull(newStatus);
assertEqualsIgnoreStickyBit(permission,newStatus.getPermission());
assertTrue(fs.delete(new Path("deep"),true));
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testRenameFolder() throws Exception {
for ( RenameFolderVariation variation : RenameFolderVariation.values()) {
Path originalFolder=new Path("folderToRename");
if (variation != RenameFolderVariation.CreateJustInnerFile) {
assertTrue(fs.mkdirs(originalFolder));
}
Path innerFile=new Path(originalFolder,"innerFile");
if (variation != RenameFolderVariation.CreateJustFolder) {
assertTrue(fs.createNewFile(innerFile));
}
Path destination=new Path("renamedFolder");
assertTrue(fs.rename(originalFolder,destination));
assertTrue(fs.exists(destination));
if (variation != RenameFolderVariation.CreateJustFolder) {
assertTrue(fs.exists(new Path(destination,innerFile.getName())));
}
assertFalse(fs.exists(originalFolder));
assertFalse(fs.exists(innerFile));
fs.delete(destination,true);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStatistics() throws Exception {
FileSystem.clearStatistics();
FileSystem.Statistics stats=FileSystem.getStatistics("wasb",NativeAzureFileSystem.class);
assertEquals(0,stats.getBytesRead());
assertEquals(0,stats.getBytesWritten());
Path newFile=new Path("testStats");
writeString(newFile,"12345678");
assertEquals(8,stats.getBytesWritten());
assertEquals(0,stats.getBytesRead());
String readBack=readString(newFile);
assertEquals("12345678",readBack);
assertEquals(8,stats.getBytesRead());
assertEquals(8,stats.getBytesWritten());
assertTrue(fs.delete(newFile,true));
assertEquals(8,stats.getBytesRead());
assertEquals(8,stats.getBytesWritten());
}
InternalCallVerifier BooleanVerifier
@Test public void testRename() throws Exception {
for ( RenameVariation variation : RenameVariation.values()) {
System.out.printf("Rename variation: %s\n",variation);
Path originalFile;
switch (variation) {
case NormalFileName:
originalFile=new Path("fileToRename");
break;
case SourceInAFolder:
originalFile=new Path("file/to/rename");
break;
case SourceWithSpace:
originalFile=new Path("file to rename");
break;
case SourceWithPlusAndPercent:
originalFile=new Path("file+to%rename");
break;
default :
throw new Exception("Unknown variation");
}
Path destinationFile=new Path("file/resting/destination");
assertTrue(fs.createNewFile(originalFile));
assertTrue(fs.exists(originalFile));
assertFalse(fs.rename(originalFile,destinationFile));
assertTrue(fs.mkdirs(destinationFile.getParent()));
assertTrue(fs.rename(originalFile,destinationFile));
assertTrue(fs.exists(destinationFile));
assertFalse(fs.exists(originalFile));
fs.delete(destinationFile.getParent(),true);
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testStoreDeleteFolder() throws Exception {
Path testFolder=new Path("storeDeleteFolder");
assertFalse(fs.exists(testFolder));
assertTrue(fs.mkdirs(testFolder));
assertTrue(fs.exists(testFolder));
FileStatus status=fs.getFileStatus(testFolder);
assertNotNull(status);
assertTrue(status.isDirectory());
assertEquals(new FsPermission((short)0755),status.getPermission());
Path innerFile=new Path(testFolder,"innerFile");
assertTrue(fs.createNewFile(innerFile));
assertTrue(fs.exists(innerFile));
assertTrue(fs.delete(testFolder,true));
assertFalse(fs.exists(innerFile));
assertFalse(fs.exists(testFolder));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testStoreRetrieveFile() throws Exception {
Path testFile=new Path("unit-test-file");
writeString(testFile,"Testing");
assertTrue(fs.exists(testFile));
FileStatus status=fs.getFileStatus(testFile);
assertNotNull(status);
assertEquals(new FsPermission((short)0644),status.getPermission());
assertEquals("Testing",readString(testFile));
fs.delete(testFile,true);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUriEncoding() throws Exception {
fs.create(new Path("p/t%5Fe")).close();
FileStatus[] listing=fs.listStatus(new Path("p"));
assertEquals(1,listing.length);
assertEquals("t%5Fe",listing[0].getPath().getName());
assertTrue(fs.rename(new Path("p"),new Path("q")));
assertTrue(fs.delete(new Path("q"),true));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCopyFromLocalFileSystem() throws Exception {
Path localFilePath=new Path(System.getProperty("test.build.data","azure_test"));
FileSystem localFs=FileSystem.get(new Configuration());
localFs.delete(localFilePath,true);
try {
writeString(localFs,localFilePath,"Testing");
Path dstPath=new Path("copiedFromLocal");
assertTrue(FileUtil.copy(localFs,localFilePath,fs,dstPath,false,fs.getConf()));
assertTrue(fs.exists(dstPath));
assertEquals("Testing",readString(fs,dstPath));
fs.delete(dstPath,true);
}
finally {
localFs.delete(localFilePath,true);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListDirectory() throws Exception {
Path rootFolder=new Path("testingList");
assertTrue(fs.mkdirs(rootFolder));
FileStatus[] listed=fs.listStatus(rootFolder);
assertEquals(0,listed.length);
Path innerFolder=new Path(rootFolder,"inner");
assertTrue(fs.mkdirs(innerFolder));
listed=fs.listStatus(rootFolder);
assertEquals(1,listed.length);
assertTrue(listed[0].isDirectory());
Path innerFile=new Path(innerFolder,"innerFile");
writeString(innerFile,"testing");
listed=fs.listStatus(rootFolder);
assertEquals(1,listed.length);
assertTrue(listed[0].isDirectory());
listed=fs.listStatus(innerFolder);
assertEquals(1,listed.length);
assertFalse(listed[0].isDirectory());
assertTrue(fs.delete(rootFolder,true));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSetOwnerOnFile() throws Exception {
Path newFile=new Path("testOwner");
OutputStream output=fs.create(newFile);
output.write(13);
output.close();
fs.setOwner(newFile,"newUser",null);
FileStatus newStatus=fs.getFileStatus(newFile);
assertNotNull(newStatus);
assertEquals("newUser",newStatus.getOwner());
assertEquals("supergroup",newStatus.getGroup());
assertEquals(1,newStatus.getLen());
fs.setOwner(newFile,null,"newGroup");
newStatus=fs.getFileStatus(newFile);
assertNotNull(newStatus);
assertEquals("newUser",newStatus.getOwner());
assertEquals("newGroup",newStatus.getGroup());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSetPermissionOnFolder() throws Exception {
Path newFolder=new Path("testPermission");
assertTrue(fs.mkdirs(newFolder));
FsPermission newPermission=new FsPermission((short)0600);
fs.setPermission(newFolder,newPermission);
FileStatus newStatus=fs.getFileStatus(newFolder);
assertNotNull(newStatus);
assertEquals(newPermission,newStatus.getPermission());
assertTrue(newStatus.isDirectory());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSetPermissionOnFile() throws Exception {
Path newFile=new Path("testPermission");
OutputStream output=fs.create(newFile);
output.write(13);
output.close();
FsPermission newPermission=new FsPermission((short)0700);
fs.setPermission(newFile,newPermission);
FileStatus newStatus=fs.getFileStatus(newFile);
assertNotNull(newStatus);
assertEquals(newPermission,newStatus.getPermission());
assertEquals("supergroup",newStatus.getGroup());
assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),newStatus.getOwner());
assertEquals(1,newStatus.getLen());
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testReadOOBWrites() throws Exception {
byte[] dataBlockWrite=new byte[UPLOAD_BLOCK_SIZE];
byte[] dataBlockRead=new byte[UPLOAD_BLOCK_SIZE];
DataOutputStream outputStream=testAccount.getStore().storefile("WASB_String.txt",new PermissionStatus("","",FsPermission.getDefault()));
Arrays.fill(dataBlockWrite,(byte)255);
for (int i=0; i < NUMBER_OF_BLOCKS; i++) {
outputStream.write(dataBlockWrite);
}
outputStream.flush();
outputStream.close();
DataBlockWriter writeBlockTask=new DataBlockWriter(testAccount,"WASB_String.txt");
writeBlockTask.startWriting();
int count=0;
DataInputStream inputStream=null;
for (int i=0; i < 5; i++) {
try {
inputStream=testAccount.getStore().retrieve("WASB_String.txt",0);
count=0;
int c=0;
while (c >= 0) {
c=inputStream.read(dataBlockRead,0,UPLOAD_BLOCK_SIZE);
if (c < 0) {
break;
}
count+=c;
}
}
catch ( IOException e) {
System.out.println(e.getCause().toString());
e.printStackTrace();
fail();
}
if (null != inputStream) {
inputStream.close();
}
}
writeBlockTask.stopWriting();
assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE,count);
}
InternalCallVerifier BooleanVerifier
@Test public void testTransientErrorOnDelete() throws Exception {
AzureBlobStorageTestAccount testAccount=AzureBlobStorageTestAccount.create();
assumeNotNull(testAccount);
try {
NativeAzureFileSystem fs=testAccount.getFileSystem();
injectTransientError(fs,new ConnectionRecognizer(){
@Override public boolean isTargetConnection( HttpURLConnection connection){
return connection.getRequestMethod().equals("DELETE");
}
}
);
Path testFile=new Path("/a/b");
assertTrue(fs.createNewFile(testFile));
assertTrue(fs.rename(testFile,new Path("/x")));
}
finally {
testAccount.cleanup();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that WASB understands the old-style ASV metadata and changes it when
* it gets the chance.
*/
@Test public void testOldPermissionMetadata() throws Exception {
Path selfishFile=new Path("/noOneElse");
HashMap metadata=new HashMap();
metadata.put("asv_permission",getExpectedPermissionString("rw-------"));
backingStore.setContent(AzureBlobStorageTestAccount.toMockUri(selfishFile),new byte[]{},metadata);
FsPermission justMe=new FsPermission(FsAction.READ_WRITE,FsAction.NONE,FsAction.NONE);
FileStatus retrievedStatus=fs.getFileStatus(selfishFile);
assertNotNull(retrievedStatus);
assertEquals(justMe,retrievedStatus.getPermission());
assertEquals(getExpectedOwner(),retrievedStatus.getOwner());
assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,retrievedStatus.getGroup());
FsPermission meAndYou=new FsPermission(FsAction.READ_WRITE,FsAction.READ_WRITE,FsAction.NONE);
fs.setPermission(selfishFile,meAndYou);
metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
assertNotNull(metadata);
String storedPermission=metadata.get("hdi_permission");
assertEquals(getExpectedPermissionString("rw-rw----"),storedPermission);
assertNull(metadata.get("asv_permission"));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFolderMetadata() throws Exception {
Path folder=new Path("/folder");
FsPermission justRead=new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ);
fs.mkdirs(folder,justRead);
HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(folder));
assertNotNull(metadata);
assertEquals("true",metadata.get("hdi_isfolder"));
assertEquals(getExpectedPermissionString("r--r--r--"),metadata.get("hdi_permission"));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test public void testPermissionMetadata() throws Exception {
FsPermission justMe=new FsPermission(FsAction.READ_WRITE,FsAction.NONE,FsAction.NONE);
Path selfishFile=new Path("/noOneElse");
fs.create(selfishFile,justMe,true,4096,fs.getDefaultReplication(),fs.getDefaultBlockSize(),null).close();
HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile));
assertNotNull(metadata);
String storedPermission=metadata.get("hdi_permission");
assertEquals(getExpectedPermissionString("rw-------"),storedPermission);
FileStatus retrievedStatus=fs.getFileStatus(selfishFile);
assertNotNull(retrievedStatus);
assertEquals(justMe,retrievedStatus.getPermission());
assertEquals(getExpectedOwner(),retrievedStatus.getOwner());
assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,retrievedStatus.getGroup());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that WASB works well with an older version container with ASV-era
* version and metadata.
*/
@Test public void testFirstContainerVersionMetadata() throws Exception {
HashMap containerMetadata=new HashMap();
containerMetadata.put(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY,AzureNativeFileSystemStore.FIRST_WASB_VERSION);
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create(containerMetadata);
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertEquals(AzureNativeFileSystemStore.FIRST_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY));
fsWithContainer.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that WASB stamped the version in the container metadata if it does a
* write operation to a pre-existing container.
*/
@Test public void testPreExistingContainerVersionMetadata() throws Exception {
FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create();
assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist")));
assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length);
assertNull(fsWithContainer.getContainerMetadata());
fsWithContainer.getFs().mkdirs(new Path("/dir"));
assertNotNull(fsWithContainer.getContainerMetadata());
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
fsWithContainer.close();
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that WASB stamped the version in the container metadata.
*/
@Test public void testContainerVersionMetadata() throws Exception {
fs.createNewFile(new Path("/foo"));
HashMap containerMetadata=backingStore.getContainerMetadata();
assertNotNull(containerMetadata);
assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,containerMetadata.get(AzureNativeFileSystemStore.VERSION_METADATA_KEY));
}
InternalCallVerifier BooleanVerifier
@Test public void testContainerCreateOnWrite() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.noneOf(CreateOptions.class));
assumeNotNull(testAccount);
CloudBlobContainer container=testAccount.getRealContainer();
FileSystem fs=testAccount.getFileSystem();
assertFalse(container.exists());
try {
fs.listStatus(new Path("/"));
assertTrue("Should've thrown.",false);
}
catch ( FileNotFoundException ex) {
assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("does not exist."));
}
assertFalse(container.exists());
try {
fs.open(new Path("/foo"));
assertFalse("Should've thrown.",true);
}
catch ( FileNotFoundException ex) {
}
assertFalse(container.exists());
assertFalse(fs.rename(new Path("/foo"),new Path("/bar")));
assertFalse(container.exists());
assertTrue(fs.createNewFile(new Path("/foo")));
assertTrue(container.exists());
}
InternalCallVerifier BooleanVerifier
@Test public void testContainerChecksWithSas() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.of(CreateOptions.UseSas));
assumeNotNull(testAccount);
CloudBlobContainer container=testAccount.getRealContainer();
FileSystem fs=testAccount.getFileSystem();
assertFalse(container.exists());
try {
fs.createNewFile(new Path("/foo"));
assertFalse("Should've thrown.",true);
}
catch ( AzureException ex) {
}
assertFalse(container.exists());
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testContainerCreateAfterDoesNotExist() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.noneOf(CreateOptions.class));
assumeNotNull(testAccount);
CloudBlobContainer container=testAccount.getRealContainer();
FileSystem fs=testAccount.getFileSystem();
assertFalse(container.exists());
try {
assertNull(fs.listStatus(new Path("/")));
assertTrue("Should've thrown.",false);
}
catch ( FileNotFoundException ex) {
assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("does not exist."));
}
assertFalse(container.exists());
container.create();
assertTrue(fs.createNewFile(new Path("/foo")));
assertTrue(container.exists());
}
InternalCallVerifier BooleanVerifier
@Test public void testContainerExistAfterDoesNotExist() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.noneOf(CreateOptions.class));
assumeNotNull(testAccount);
CloudBlobContainer container=testAccount.getRealContainer();
FileSystem fs=testAccount.getFileSystem();
assertFalse(container.exists());
try {
fs.listStatus(new Path("/"));
assertTrue("Should've thrown.",false);
}
catch ( FileNotFoundException ex) {
assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("does not exist."));
}
assertFalse(container.exists());
container.create();
CloudBlockBlob blob=testAccount.getBlobReference("foo");
BlobOutputStream outputStream=blob.openOutputStream();
outputStream.write(new byte[10]);
outputStream.close();
assertTrue(fs.exists(new Path("/foo")));
assertTrue(container.exists());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNumberOfBlocks() throws Exception {
Configuration conf=new Configuration();
conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME,"500");
AzureBlobStorageTestAccount testAccount=AzureBlobStorageTestAccount.createMock(conf);
FileSystem fs=testAccount.getFileSystem();
Path testFile=createTestFile(fs,1200);
FileStatus stat=fs.getFileStatus(testFile);
assertEquals(500,stat.getBlockSize());
testAccount.cleanup();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testLinkBlobs() throws Exception {
Path filePath=new Path("/inProgress");
FSDataOutputStream outputStream=fs.create(filePath);
HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath));
assertNotNull(metadata);
String linkValue=metadata.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
assertNotNull(linkValue);
assertTrue(backingStore.exists(AzureBlobStorageTestAccount.toMockUri(linkValue)));
assertTrue(fs.exists(filePath));
outputStream.close();
metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(filePath));
assertNull(metadata.get(AzureNativeFileSystemStore.LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY));
}
InternalCallVerifier EqualityVerifier
/**
* Test to make sure that we don't expose the temporary upload folder when
* listing at the root.
*/
@Test public void testNoTempBlobsVisible() throws Exception {
Path filePath=new Path("/inProgress");
FSDataOutputStream outputStream=fs.create(filePath);
FileStatus[] listOfRoot=fs.listStatus(new Path("/"));
assertEquals("Expected one file listed, instead got: " + toString(listOfRoot),1,listOfRoot.length);
assertEquals(fs.makeQualified(filePath),listOfRoot[0].getPath());
outputStream.close();
}
IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Tests running starting multiple threads all doing various File system
* operations against the same FS.
*/
@Test public void testMultiThreadedOperation() throws Exception {
for (int iter=0; iter < 10; iter++) {
final int numThreads=20;
Thread[] threads=new Thread[numThreads];
final ConcurrentLinkedQueue exceptionsEncountered=new ConcurrentLinkedQueue();
for (int i=0; i < numThreads; i++) {
final Path threadLocalFile=new Path("/myFile" + i);
threads[i]=new Thread(new Runnable(){
@Override public void run(){
try {
assertTrue(!fs.exists(threadLocalFile));
OutputStream output=fs.create(threadLocalFile);
output.write(5);
output.close();
assertTrue(fs.exists(threadLocalFile));
assertTrue(fs.listStatus(new Path("/")).length > 0);
}
catch ( Throwable ex) {
exceptionsEncountered.add(ex);
}
}
}
);
}
for ( Thread t : threads) {
t.start();
}
for ( Thread t : threads) {
t.join();
}
assertTrue("Encountered exceptions: " + StringUtils.join("\r\n",selectToString(exceptionsEncountered)),exceptionsEncountered.isEmpty());
tearDown();
setUp();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRename() throws Exception {
Path testFile1=new Path(root + "/testFile1");
assertTrue(fs.createNewFile(testFile1));
Path testFile2=new Path(root + "/testFile2");
fs.rename(testFile1,testFile2);
assertTrue(!fs.exists(testFile1) && fs.exists(testFile2));
Path testFile3=new Path(root + "/testFile3:3");
try {
fs.rename(testFile2,testFile3);
fail("Should've thrown.");
}
catch ( IOException e) {
}
assertTrue(fs.exists(testFile2));
}
InternalCallVerifier BooleanVerifier
@Test public void testWasbFsck() throws Exception {
Path testFolder1=new Path(root + "/testFolder1");
assertTrue(fs.mkdirs(testFolder1));
Path testFolder2=new Path(testFolder1,"testFolder2");
assertTrue(fs.mkdirs(testFolder2));
Path testFolder3=new Path(testFolder1,"testFolder3");
assertTrue(fs.mkdirs(testFolder3));
Path testFile1=new Path(testFolder2,"testFile1");
assertTrue(fs.createNewFile(testFile1));
Path testFile2=new Path(testFolder1,"testFile2");
assertTrue(fs.createNewFile(testFile2));
assertFalse(runWasbFsck(testFolder1));
InMemoryBlockBlobStore backingStore=testAccount.getMockStorage().getBackingStore();
backingStore.setContent(AzureBlobStorageTestAccount.toMockUri("testFolder1/testFolder2/test2:2"),new byte[]{1,2},new HashMap());
assertTrue(runWasbFsck(testFolder1));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testMkdirs() throws Exception {
Path testFolder1=new Path(root + "/testFolder1");
assertTrue(fs.mkdirs(testFolder1));
Path testFolder2=new Path(root + "/testFolder2:2");
try {
assertTrue(fs.mkdirs(testFolder2));
fail("Should've thrown.");
}
catch ( IOException e) {
}
}
InternalCallVerifier BooleanVerifier
@Test public void testFileInImplicitFolderDeleted() throws Exception {
createEmptyBlobOutOfBand("root/b");
assertTrue(fs.exists(new Path("/root")));
assertTrue(fs.delete(new Path("/root/b"),true));
assertTrue(fs.exists(new Path("/root")));
}
BranchVerifier InternalCallVerifier BooleanVerifier
/**
* Tests that when we create the file (or folder) x/y/z, we also create
* explicit folder blobs for x and x/y
*/
@Test public void testCreatingDeepFileCreatesExplicitFolder() throws Exception {
for ( DeepCreateTestVariation variation : DeepCreateTestVariation.values()) {
switch (variation) {
case File:
assertTrue(fs.createNewFile(new Path("/x/y/z")));
break;
case Folder:
assertTrue(fs.mkdirs(new Path("/x/y/z")));
break;
}
assertTrue(backingStore.exists(AzureBlobStorageTestAccount.toMockUri("x")));
assertTrue(backingStore.exists(AzureBlobStorageTestAccount.toMockUri("x/y")));
fs.delete(new Path("/x"),true);
}
}
InternalCallVerifier BooleanVerifier
@Test public void testImplicitFolderDeleted() throws Exception {
createEmptyBlobOutOfBand("root/b");
assertTrue(fs.exists(new Path("/root")));
assertTrue(fs.delete(new Path("/root"),true));
assertFalse(fs.exists(new Path("/root")));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testImplicitFolderListed() throws Exception {
createEmptyBlobOutOfBand("root/b");
FileStatus[] obtained=fs.listStatus(new Path("/root/b"));
assertNotNull(obtained);
assertEquals(1,obtained.length);
assertFalse(obtained[0].isDirectory());
assertEquals("/root/b",obtained[0].getPath().toUri().getPath());
obtained=fs.listStatus(new Path("/root"));
assertNotNull(obtained);
assertEquals(1,obtained.length);
assertFalse(obtained[0].isDirectory());
assertEquals("/root/b",obtained[0].getPath().toUri().getPath());
FileStatus dirStatus=fs.getFileStatus(new Path("/root"));
assertNotNull(dirStatus);
assertTrue(dirStatus.isDirectory());
assertEquals("/root",dirStatus.getPath().toUri().getPath());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSetOwnerOnImplicitFolder() throws Exception {
createEmptyBlobOutOfBand("root/b");
fs.setOwner(new Path("/root"),"newOwner",null);
FileStatus newStatus=fs.getFileStatus(new Path("/root"));
assertNotNull(newStatus);
assertEquals("newOwner",newStatus.getOwner());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSetPermissionOnImplicitFolder() throws Exception {
createEmptyBlobOutOfBand("root/b");
FsPermission newPermission=new FsPermission((short)0600);
fs.setPermission(new Path("/root"),newPermission);
FileStatus newStatus=fs.getFileStatus(new Path("/root"));
assertNotNull(newStatus);
assertEquals(newPermission,newStatus.getPermission());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFileAndImplicitFolderSameName() throws Exception {
createEmptyBlobOutOfBand("root/b");
createEmptyBlobOutOfBand("root/b/c");
FileStatus[] listResult=fs.listStatus(new Path("/root/b"));
assertEquals(1,listResult.length);
assertFalse(listResult[0].isDirectory());
try {
fs.delete(new Path("/root/b/c"),true);
assertTrue("Should've thrown.",false);
}
catch ( AzureException e) {
assertEquals("File /root/b/c has a parent directory /root/b" + " which is also a file. Can't resolve.",e.getMessage());
}
}
InternalCallVerifier BooleanVerifier
@Test public void outOfBandFolder_uncleMkdirs() throws Exception {
String workingDir="user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob=testAccount.getBlobReference(workingDir + "testFolder1/a/input/file");
BlobOutputStream s=blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("testFolder1/a/input/file")));
Path targetFolder=new Path("testFolder1/a/output");
assertTrue(fs.mkdirs(targetFolder));
}
InternalCallVerifier BooleanVerifier
@Test public void outOfBandFolder_firstLevelFolderDelete() throws Exception {
CloudBlockBlob blob=testAccount.getBlobReference("folderW/file");
BlobOutputStream s=blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("/folderW")));
assertTrue(fs.exists(new Path("/folderW/file")));
assertTrue(fs.delete(new Path("/folderW"),true));
}
InternalCallVerifier BooleanVerifier
@Test public void outOfBandFolder_rootFileDelete() throws Exception {
CloudBlockBlob blob=testAccount.getBlobReference("fileY");
BlobOutputStream s=blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("/fileY")));
assertTrue(fs.delete(new Path("/fileY"),true));
}
InternalCallVerifier BooleanVerifier
@Test public void outOfBandFolder_parentDelete() throws Exception {
String workingDir="user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/";
CloudBlockBlob blob=testAccount.getBlobReference(workingDir + "testFolder2/a/input/file");
BlobOutputStream s=blob.openOutputStream();
s.close();
assertTrue(fs.exists(new Path("testFolder2/a/input/file")));
Path targetFolder=new Path("testFolder2/a/input");
assertTrue(fs.delete(targetFolder,true));
}
InternalCallVerifier EqualityVerifier
@Test public void testValidScript() throws Exception {
if (!Shell.WINDOWS) {
return;
}
String expectedResult="decretedKey";
File scriptFile=new File(TEST_ROOT_DIR,"testScript.cmd");
FileUtils.writeStringToFile(scriptFile,"@echo %1 " + expectedResult);
ShellDecryptionKeyProvider provider=new ShellDecryptionKeyProvider();
Configuration conf=new Configuration();
String account="testacct";
String key="key1";
conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,key);
conf.set(ShellDecryptionKeyProvider.KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT,"cmd /c " + scriptFile.getAbsolutePath());
String result=provider.getStorageAccountKey(account,conf);
assertEquals(key + " " + expectedResult,result);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that we delete dangling files properly
*/
@Test public void testDelete() throws Exception {
Path danglingFile=new Path("/crashedInTheMiddle");
FSDataOutputStream stream=fs.create(danglingFile);
stream.write(new byte[]{1,2,3});
stream.flush();
FileStatus fileStatus=fs.getFileStatus(danglingFile);
assertNotNull(fileStatus);
assertEquals(0,fileStatus.getLen());
assertEquals(1,getNumTempBlobs());
runFsck("-delete");
assertEquals(0,getNumTempBlobs());
assertFalse(fs.exists(danglingFile));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDefaultKeyProvider() throws Exception {
Configuration conf=new Configuration();
String account="testacct";
String key="testkey";
conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,key);
String result=AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account,conf);
assertEquals(key,result);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testValidKeyProvider() throws Exception {
Configuration conf=new Configuration();
String account="testacct";
String key="testkey";
conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,key);
conf.setClass("fs.azure.account.keyprovider." + account,SimpleKeyProvider.class,KeyProvider.class);
String result=AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account,conf);
assertEquals(key,result);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testConnectUsingSASReadonly() throws Exception {
testAccount=AzureBlobStorageTestAccount.create("",EnumSet.of(CreateOptions.UseSas,CreateOptions.CreateContainer,CreateOptions.Readonly));
assumeNotNull(testAccount);
final String blobKey="blobForReadonly";
CloudBlobContainer container=testAccount.getRealContainer();
CloudBlockBlob blob=container.getBlockBlobReference(blobKey);
ByteArrayInputStream inputStream=new ByteArrayInputStream(new byte[]{1,2,3});
blob.upload(inputStream,3);
inputStream.close();
Path filePath=new Path("/" + blobKey);
FileSystem fs=testAccount.getFileSystem();
assertTrue(fs.exists(filePath));
byte[] obtained=new byte[3];
DataInputStream obtainedInputStream=fs.open(filePath);
obtainedInputStream.readFully(obtained);
obtainedInputStream.close();
assertEquals(3,obtained[2]);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests the cases when the URI is specified with no authority, i.e.
* wasb:///path/to/file.
*/
@Test public void testNoUriAuthority() throws Exception {
String[] wasbAliases=new String[]{"wasb","wasbs"};
for ( String defaultScheme : wasbAliases) {
for ( String wantedScheme : wasbAliases) {
testAccount=AzureBlobStorageTestAccount.createMock();
Configuration conf=testAccount.getFileSystem().getConf();
String authority=testAccount.getFileSystem().getUri().getAuthority();
URI defaultUri=new URI(defaultScheme,authority,null,null,null);
conf.set("fs.default.name",defaultUri.toString());
URI wantedUri=new URI(wantedScheme + ":///random/path");
NativeAzureFileSystem obtained=(NativeAzureFileSystem)FileSystem.get(wantedUri,conf);
assertNotNull(obtained);
assertEquals(new URI(wantedScheme,authority,null,null,null),obtained.getUri());
Path qualified=obtained.makeQualified(new Path(wantedUri));
assertEquals(new URI(wantedScheme,authority,wantedUri.getPath(),null,null),qualified.toUri());
testAccount.cleanup();
FileSystem.closeAll();
}
}
testAccount=AzureBlobStorageTestAccount.createMock();
Configuration conf=testAccount.getFileSystem().getConf();
conf.set("fs.default.name","file:///");
try {
FileSystem.get(new URI("wasb:///random/path"),conf);
fail("Should've thrown.");
}
catch ( IllegalArgumentException e) {
}
}
InternalCallVerifier BooleanVerifier
@Test public void testMultipleContainers() throws Exception {
AzureBlobStorageTestAccount firstAccount=AzureBlobStorageTestAccount.create("first"), secondAccount=AzureBlobStorageTestAccount.create("second");
assumeNotNull(firstAccount);
assumeNotNull(secondAccount);
try {
FileSystem firstFs=firstAccount.getFileSystem(), secondFs=secondAccount.getFileSystem();
Path testFile=new Path("/testWasb");
assertTrue(validateIOStreams(firstFs,testFile));
assertTrue(validateIOStreams(secondFs,testFile));
writeSingleByte(firstFs,testFile,5);
writeSingleByte(secondFs,testFile,7);
assertSingleByteValue(firstFs,testFile,5);
assertSingleByteValue(secondFs,testFile,7);
}
finally {
firstAccount.cleanup();
secondAccount.cleanup();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMetricsOnMkdirList() throws Exception {
long base=getBaseWebResponses();
assertTrue(fs.mkdirs(new Path("a")));
base=assertWebResponsesInRange(base,1,12);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_DIRECTORIES_CREATED));
assertEquals(1,fs.listStatus(new Path("/")).length);
base=assertWebResponsesEquals(base,1);
assertNoErrors();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMetricsOnFileRename() throws Exception {
long base=getBaseWebResponses();
Path originalPath=new Path("/metricsTest_RenameStart");
Path destinationPath=new Path("/metricsTest_RenameFinal");
assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED));
assertTrue(fs.createNewFile(originalPath));
logOpResponseCount("Creating an empty file",base);
base=assertWebResponsesInRange(base,2,20);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED));
assertTrue(fs.rename(originalPath,destinationPath));
logOpResponseCount("Renaming a file",base);
base=assertWebResponsesInRange(base,2,15);
assertNoErrors();
}
InternalCallVerifier BooleanVerifier
@Test public void testMetricsOnDirRename() throws Exception {
long base=getBaseWebResponses();
Path originalDirName=new Path("/metricsTestDirectory_RenameStart");
Path innerFileName=new Path(originalDirName,"innerFile");
Path destDirName=new Path("/metricsTestDirectory_RenameFinal");
assertTrue(fs.mkdirs(originalDirName));
base=getCurrentWebResponses();
assertTrue(fs.createNewFile(innerFileName));
base=getCurrentWebResponses();
assertTrue(fs.rename(originalDirName,destDirName));
logOpResponseCount("Renaming a directory",base);
base=assertWebResponsesInRange(base,1,20);
assertNoErrors();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMetricsOnFileExistsDelete() throws Exception {
long base=getBaseWebResponses();
Path filePath=new Path("/metricsTest_delete");
assertFalse(fs.exists(filePath));
logOpResponseCount("Checking file existence for non-existent file",base);
base=assertWebResponsesInRange(base,1,3);
assertTrue(fs.createNewFile(filePath));
base=getCurrentWebResponses();
assertTrue(fs.exists(filePath));
logOpResponseCount("Checking file existence for existent file",base);
base=assertWebResponsesInRange(base,1,2);
assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED));
assertTrue(fs.delete(filePath,false));
logOpResponseCount("Deleting a file",base);
base=assertWebResponsesInRange(base,1,4);
assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED));
assertNoErrors();
}
InternalCallVerifier EqualityVerifier
/**
* Tests that when we have multiple file systems created/destroyed
* metrics from each are published correctly.
* @throws Exception
*/
@Test public void testMetricsAcrossFileSystems() throws Exception {
AzureBlobStorageTestAccount a1, a2, a3;
a1=AzureBlobStorageTestAccount.createMock();
assertEquals(0,getFilesCreated(a1));
a2=AzureBlobStorageTestAccount.createMock();
assertEquals(0,getFilesCreated(a2));
a1.getFileSystem().create(new Path("/foo")).close();
a1.getFileSystem().create(new Path("/bar")).close();
a2.getFileSystem().create(new Path("/baz")).close();
assertEquals(0,getFilesCreated(a1));
assertEquals(0,getFilesCreated(a2));
a1.closeFileSystem();
a2.closeFileSystem();
assertEquals(2,getFilesCreated(a1));
assertEquals(1,getFilesCreated(a2));
a3=AzureBlobStorageTestAccount.createMock();
assertEquals(0,getFilesCreated(a3));
a3.closeFileSystem();
assertEquals(0,getFilesCreated(a3));
}
InternalCallVerifier EqualityVerifier
/**
* Tests the basic functionality of the class.
*/
@Test public void testBasicFunctionality() throws Exception {
RollingWindowAverage average=new RollingWindowAverage(100);
assertEquals(0,average.getCurrentAverage());
average.addPoint(5);
assertEquals(5,average.getCurrentAverage());
Thread.sleep(50);
average.addPoint(15);
assertEquals(10,average.getCurrentAverage());
Thread.sleep(60);
assertEquals(15,average.getCurrentAverage());
Thread.sleep(50);
assertEquals(0,average.getCurrentAverage());
}
UtilityVerifier InternalCallVerifier
@Test public void testNoMkdirOverFile() throws Throwable {
describe("try to mkdir over a file");
FileSystem fs=getFileSystem();
Path path=path("testNoMkdirOverFile");
byte[] dataset=dataset(1024,' ','z');
createFile(getFileSystem(),path,false,dataset);
try {
boolean made=fs.mkdirs(path);
fail("mkdirs did not fail over a file but returned " + made + "; "+ ls(path));
}
catch ( ParentNotDirectoryException e) {
handleExpectedException(e);
}
catch ( FileAlreadyExistsException e) {
handleExpectedException(e);
;
}
catch ( IOException e) {
handleRelaxedException("mkdirs","FileAlreadyExistsException",e);
}
assertIsFile(path);
byte[] bytes=ContractTestUtils.readDataset(getFileSystem(),path,dataset.length);
ContractTestUtils.compareByteArrays(dataset,bytes,dataset.length);
assertPathExists("mkdir failed",path);
assertDeleted(path,true);
}
UtilityVerifier InternalCallVerifier
@Test public void testMkdirOverParentFile() throws Throwable {
describe("try to mkdir where a parent is a file");
FileSystem fs=getFileSystem();
Path path=path("testMkdirOverParentFile");
byte[] dataset=dataset(1024,' ','z');
createFile(getFileSystem(),path,false,dataset);
Path child=new Path(path,"child-to-mkdir");
try {
boolean made=fs.mkdirs(child);
fail("mkdirs did not fail over a file but returned " + made + "; "+ ls(path));
}
catch ( ParentNotDirectoryException e) {
handleExpectedException(e);
}
catch ( FileAlreadyExistsException e) {
handleExpectedException(e);
}
catch ( IOException e) {
handleRelaxedException("mkdirs","ParentNotDirectoryException",e);
}
assertIsFile(path);
byte[] bytes=ContractTestUtils.readDataset(getFileSystem(),path,dataset.length);
ContractTestUtils.compareByteArrays(dataset,bytes,dataset.length);
assertPathExists("mkdir failed",path);
assertDeleted(path,true);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOpenFileTwice() throws Throwable {
describe("verify that two opened file streams are independent");
Path path=path("testopenfiletwice.txt");
byte[] block=dataset(TEST_FILE_LEN,0,255);
createFile(getFileSystem(),path,false,block);
FSDataInputStream instream1=getFileSystem().open(path);
int c=instream1.read();
assertEquals(0,c);
FSDataInputStream instream2=null;
try {
instream2=getFileSystem().open(path);
assertEquals("first read of instream 2",0,instream2.read());
assertEquals("second read of instream 1",1,instream1.read());
instream1.close();
assertEquals("second read of instream 2",1,instream2.read());
instream1.close();
}
finally {
IOUtils.closeStream(instream1);
IOUtils.closeStream(instream2);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSequentialRead() throws Throwable {
describe("verify that sequential read() operations return values");
Path path=path("testsequentialread.txt");
int len=4;
int base=0x40;
byte[] block=dataset(len,base,base + len);
createFile(getFileSystem(),path,false,block);
instream=getFileSystem().open(path);
assertEquals(base,instream.read());
assertEquals(base + 1,instream.read());
assertEquals(base + 2,instream.read());
assertEquals(base + 3,instream.read());
assertEquals(-1,instream.read());
assertEquals(-1,instream.read());
instream.close();
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRenameNonexistentFile() throws Throwable {
describe("rename a file into a new file in the same directory");
Path missing=path("testRenameNonexistentFileSrc");
Path target=path("testRenameNonexistentFileDest");
boolean renameReturnsFalseOnFailure=isSupported(ContractOptions.RENAME_RETURNS_FALSE_IF_SOURCE_MISSING);
mkdirs(missing.getParent());
try {
boolean renamed=rename(missing,target);
if (!renameReturnsFalseOnFailure) {
String destDirLS=generateAndLogErrorListing(missing,target);
fail("expected rename(" + missing + ", "+ target+ " ) to fail,"+ " got a result of "+ renamed+ " and a destination directory of "+ destDirLS);
}
else {
getLog().warn("Rename returned {} renaming a nonexistent file",renamed);
assertFalse("Renaming a missing file returned true",renamed);
}
}
catch ( FileNotFoundException e) {
if (renameReturnsFalseOnFailure) {
ContractTestUtils.fail("Renaming a missing file unexpectedly threw an exception",e);
}
handleExpectedException(e);
}
catch ( IOException e) {
handleRelaxedException("rename nonexistent file","FileNotFoundException",e);
}
assertPathDoesNotExist("rename nonexistent file created a destination file",target);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Seek round a file bigger than IO buffers
* @throws Throwable
*/
@Test public void testSeekBigFile() throws Throwable {
describe("Seek round a large file and verify the bytes are what is expected");
Path testSeekFile=path("bigseekfile.txt");
byte[] block=dataset(65536,0,255);
createFile(getFileSystem(),testSeekFile,false,block);
instream=getFileSystem().open(testSeekFile);
assertEquals(0,instream.getPos());
instream.seek(0);
int result=instream.read();
assertEquals(0,result);
assertEquals(1,instream.read());
assertEquals(2,instream.read());
instream.seek(32768);
assertEquals("@32768",block[32768],(byte)instream.read());
instream.seek(40000);
assertEquals("@40000",block[40000],(byte)instream.read());
instream.seek(8191);
assertEquals("@8191",block[8191],(byte)instream.read());
instream.seek(0);
assertEquals("@0",0,(byte)instream.read());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSeekAndReadPastEndOfFile() throws Throwable {
describe("verify that reading past the last bytes in the file returns -1");
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(TEST_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier
/**
* Seek and read on a closed file.
* Some filesystems let callers seek on a closed file -these must
* still fail on the subsequent reads.
* @throws Throwable
*/
@Test public void testSeekReadClosedFile() throws Throwable {
boolean supportsSeekOnClosedFiles=isSupported(SUPPORTS_SEEK_ON_CLOSED_FILE);
instream=getFileSystem().open(smallSeekFile);
getLog().debug("Stream is of type " + instream.getClass().getCanonicalName());
instream.close();
try {
instream.seek(0);
if (!supportsSeekOnClosedFiles) {
fail("seek succeeded on a closed stream");
}
}
catch ( IOException e) {
}
try {
int data=instream.available();
fail("read() succeeded on a closed stream, got " + data);
}
catch ( IOException e) {
}
try {
int data=instream.read();
fail("read() succeeded on a closed stream, got " + data);
}
catch ( IOException e) {
}
try {
byte[] buffer=new byte[1];
int result=instream.read(buffer,0,1);
fail("read(buffer, 0, 1) succeeded on a closed stream, got " + result);
}
catch ( IOException e) {
}
try {
long offset=instream.getPos();
}
catch ( IOException e) {
}
instream.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
describe("verify that a positioned read does not change the getPos() value");
Path testSeekFile=path("bigseekfile.txt");
byte[] block=dataset(65536,0,255);
createFile(getFileSystem(),testSeekFile,false,block);
instream=getFileSystem().open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000,instream.getPos());
byte[] readBuffer=new byte[256];
instream.read(128,readBuffer,0,readBuffer.length);
assertEquals(40000,instream.getPos());
assertEquals("@40000",block[40000],(byte)instream.read());
for (int i=0; i < 256; i++) {
assertEquals("@" + i,block[i + 128],readBuffer[i]);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testSeekPastEndOfFileThenReseekAndRead() throws Throwable {
describe("do a seek past the EOF, then verify the stream recovers");
instream=getFileSystem().open(smallSeekFile);
boolean canSeekPastEOF=!getContract().isSupported(ContractOptions.REJECTS_SEEK_PAST_EOF,true);
try {
instream.seek(TEST_FILE_LEN + 1);
assertMinusOne("read after seeking past EOF",instream.read());
}
catch ( EOFException e) {
if (canSeekPastEOF) {
throw e;
}
handleExpectedException(e);
}
catch ( IOException e) {
if (canSeekPastEOF) {
throw e;
}
handleRelaxedException("a seek past the end of the file","EOFException",e);
}
instream.seek(1);
assertTrue("Premature EOF",instream.read() != -1);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSeekFile() throws Throwable {
describe("basic seek operations");
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(0);
int result=instream.read();
assertEquals(0,result);
assertEquals(1,instream.read());
assertEquals(2,instream.getPos());
assertEquals(2,instream.read());
assertEquals(3,instream.getPos());
instream.seek(128);
assertEquals(128,instream.getPos());
assertEquals(128,instream.read());
instream.seek(63);
assertEquals(63,instream.read());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNegativeSeek() throws Throwable {
instream=getFileSystem().open(smallSeekFile);
assertEquals(0,instream.getPos());
try {
instream.seek(-1);
long p=instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result=instream.read();
fail("expected an exception, got data " + result + " at a position of "+ p);
}
catch ( EOFException e) {
handleExpectedException(e);
}
catch ( IOException e) {
handleRelaxedException("a negative seek","EOFException",e);
}
assertEquals(0,instream.getPos());
}
TestInitializer InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Setup: create the contract then init it
* @throws Exception on any failure
*/
@Before public void setup() throws Exception {
contract=createContract(createConfiguration());
contract.init();
assumeEnabled();
fileSystem=contract.getTestFileSystem();
assertNotNull("null filesystem",fileSystem);
URI fsURI=fileSystem.getUri();
LOG.info("Test filesystem = {} implemented by {}",fsURI,fileSystem);
assertEquals("wrong filesystem of " + fsURI,contract.getScheme(),fsURI.getScheme());
testPath=getContract().getTestPath();
mkdirs(testPath);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testDelegationTokenOperations() throws Exception {
createHttpFSServer(true);
URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
AuthenticationToken token=new AuthenticationToken("u","p",new KerberosDelegationTokenAuthenticationHandler().getType());
token.setExpires(System.currentTimeMillis() + 100000000);
Signer signer=new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned=signer.sign(token.toString());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY");
conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty("Cookie",AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETDELEGATIONTOKEN");
conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty("Cookie",AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
JSONObject json=(JSONObject)new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
json=(JSONObject)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr=(String)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
conn.setRequestProperty("Cookie",AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode());
}
InternalCallVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testValidHttpFSAccess() throws Exception {
createHttpFSServer();
KerberosTestUtils.doAsClient(new Callable(){
@Override public Void call() throws Exception {
URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY");
AuthenticatedURL aUrl=new AuthenticatedURL();
AuthenticatedURL.Token aToken=new AuthenticatedURL.Token();
HttpURLConnection conn=aUrl.openConnection(url,aToken);
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @TestDir @TestJetty @TestHdfs public void testDelegationTokenHttpFSAccess() throws Exception {
createHttpFSServer();
KerberosTestUtils.doAsClient(new Callable(){
@Override public Void call() throws Exception {
URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETDELEGATIONTOKEN");
AuthenticatedURL aUrl=new AuthenticatedURL();
AuthenticatedURL.Token aToken=new AuthenticatedURL.Token();
HttpURLConnection conn=aUrl.openConnection(url,aToken);
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
JSONObject json=(JSONObject)new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
json=(JSONObject)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr=(String)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr);
conn=aUrl.openConnection(url,aToken);
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK);
url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED);
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test if the structure generator works fine
*/
@Test public void testStructureGenerator() throws Exception {
StructureGenerator sg=new StructureGenerator();
String[] args=new String[]{"-maxDepth","2","-minWidth","1","-maxWidth","2","-numOfFiles","2","-avgFileSize","1","-outDir",OUT_DIR.getAbsolutePath(),"-seed","1"};
final int MAX_DEPTH=1;
final int MIN_WIDTH=3;
final int MAX_WIDTH=5;
final int NUM_OF_FILES=7;
final int AVG_FILE_SIZE=9;
final int SEED=13;
try {
assertEquals(0,sg.run(args));
BufferedReader in=new BufferedReader(new FileReader(DIR_STRUCTURE_FILE));
assertEquals(DIR_STRUCTURE_FIRST_LINE,in.readLine());
assertEquals(DIR_STRUCTURE_SECOND_LINE,in.readLine());
assertEquals(null,in.readLine());
in.close();
in=new BufferedReader(new FileReader(FILE_STRUCTURE_FILE));
assertEquals(FILE_STRUCTURE_FIRST_LINE,in.readLine());
assertEquals(FILE_STRUCTURE_SECOND_LINE,in.readLine());
assertEquals(null,in.readLine());
in.close();
String oldArg=args[MAX_DEPTH];
args[MAX_DEPTH]="0";
assertEquals(-1,sg.run(args));
args[MAX_DEPTH]=oldArg;
oldArg=args[MIN_WIDTH];
args[MIN_WIDTH]="-1";
assertEquals(-1,sg.run(args));
args[MIN_WIDTH]=oldArg;
oldArg=args[MAX_WIDTH];
args[MAX_WIDTH]="-1";
assertEquals(-1,sg.run(args));
args[MAX_WIDTH]=oldArg;
oldArg=args[NUM_OF_FILES];
args[NUM_OF_FILES]="-1";
assertEquals(-1,sg.run(args));
args[NUM_OF_FILES]=oldArg;
oldArg=args[NUM_OF_FILES];
args[NUM_OF_FILES]="-1";
assertEquals(-1,sg.run(args));
args[NUM_OF_FILES]=oldArg;
oldArg=args[AVG_FILE_SIZE];
args[AVG_FILE_SIZE]="-1";
assertEquals(-1,sg.run(args));
args[AVG_FILE_SIZE]=oldArg;
oldArg=args[SEED];
args[SEED]="34.d4";
assertEquals(-1,sg.run(args));
args[SEED]=oldArg;
}
finally {
DIR_STRUCTURE_FILE.delete();
FILE_STRUCTURE_FILE.delete();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test if the load generator works fine
*/
@Test public void testLoadGenerator() throws Exception {
final String TEST_SPACE_ROOT="/test";
final String SCRIPT_TEST_DIR=OUT_DIR.getAbsolutePath();
String script=SCRIPT_TEST_DIR + "/" + "loadgenscript";
String script2=SCRIPT_TEST_DIR + "/" + "loadgenscript2";
File scriptFile1=new File(script);
File scriptFile2=new File(script2);
FileWriter writer=new FileWriter(DIR_STRUCTURE_FILE);
writer.write(DIR_STRUCTURE_FIRST_LINE + "\n");
writer.write(DIR_STRUCTURE_SECOND_LINE + "\n");
writer.close();
writer=new FileWriter(FILE_STRUCTURE_FILE);
writer.write(FILE_STRUCTURE_FIRST_LINE + "\n");
writer.write(FILE_STRUCTURE_SECOND_LINE + "\n");
writer.close();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(3).build();
cluster.waitActive();
try {
DataGenerator dg=new DataGenerator();
dg.setConf(CONF);
String[] args=new String[]{"-inDir",OUT_DIR.getAbsolutePath(),"-root",TEST_SPACE_ROOT};
assertEquals(0,dg.run(args));
final int READ_PROBABILITY=1;
final int WRITE_PROBABILITY=3;
final int MAX_DELAY_BETWEEN_OPS=7;
final int NUM_OF_THREADS=9;
final int START_TIME=11;
final int ELAPSED_TIME=13;
LoadGenerator lg=new LoadGenerator();
lg.setConf(CONF);
args=new String[]{"-readProbability","0.3","-writeProbability","0.3","-root",TEST_SPACE_ROOT,"-maxDelayBetweenOps","0","-numOfThreads","1","-startTime",Long.toString(Time.now()),"-elapsedTime","10"};
assertEquals(0,lg.run(args));
String oldArg=args[READ_PROBABILITY];
args[READ_PROBABILITY]="1.1";
assertEquals(-1,lg.run(args));
args[READ_PROBABILITY]="-1.1";
assertEquals(-1,lg.run(args));
args[READ_PROBABILITY]=oldArg;
oldArg=args[WRITE_PROBABILITY];
args[WRITE_PROBABILITY]="1.1";
assertEquals(-1,lg.run(args));
args[WRITE_PROBABILITY]="-1.1";
assertEquals(-1,lg.run(args));
args[WRITE_PROBABILITY]="0.9";
assertEquals(-1,lg.run(args));
args[READ_PROBABILITY]=oldArg;
oldArg=args[MAX_DELAY_BETWEEN_OPS];
args[MAX_DELAY_BETWEEN_OPS]="1.x1";
assertEquals(-1,lg.run(args));
args[MAX_DELAY_BETWEEN_OPS]=oldArg;
oldArg=args[MAX_DELAY_BETWEEN_OPS];
args[MAX_DELAY_BETWEEN_OPS]="1.x1";
assertEquals(-1,lg.run(args));
args[MAX_DELAY_BETWEEN_OPS]=oldArg;
oldArg=args[NUM_OF_THREADS];
args[NUM_OF_THREADS]="-1";
assertEquals(-1,lg.run(args));
args[NUM_OF_THREADS]=oldArg;
oldArg=args[START_TIME];
args[START_TIME]="-1";
assertEquals(-1,lg.run(args));
args[START_TIME]=oldArg;
oldArg=args[ELAPSED_TIME];
args[ELAPSED_TIME]="-1";
assertEquals(-1,lg.run(args));
args[ELAPSED_TIME]=oldArg;
FileWriter fw=new FileWriter(scriptFile1);
fw.write("2 .22 .33\n");
fw.write("3 .10 .6\n");
fw.write("6 0 .7\n");
fw.close();
String[] scriptArgs=new String[]{"-root",TEST_SPACE_ROOT,"-maxDelayBetweenOps","0","-numOfThreads","10","-startTime",Long.toString(Time.now()),"-scriptFile",script};
assertEquals(0,lg.run(scriptArgs));
fw=new FileWriter(scriptFile2);
fw.write("2 .22 .33\n");
fw.write("3 blah blah blah .6\n");
fw.write("6 0 .7\n");
fw.close();
scriptArgs[scriptArgs.length - 1]=script2;
assertEquals(-1,lg.run(scriptArgs));
}
finally {
cluster.shutdown();
DIR_STRUCTURE_FILE.delete();
FILE_STRUCTURE_FILE.delete();
scriptFile1.delete();
scriptFile2.delete();
}
}
InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testEntryEquals(){
assertNotSame(ENTRY1,ENTRY2);
assertNotSame(ENTRY1,ENTRY3);
assertNotSame(ENTRY1,ENTRY4);
assertNotSame(ENTRY2,ENTRY3);
assertNotSame(ENTRY2,ENTRY4);
assertNotSame(ENTRY3,ENTRY4);
assertEquals(ENTRY1,ENTRY1);
assertEquals(ENTRY2,ENTRY2);
assertEquals(ENTRY1,ENTRY2);
assertEquals(ENTRY2,ENTRY1);
assertFalse(ENTRY1.equals(ENTRY3));
assertFalse(ENTRY1.equals(ENTRY4));
assertFalse(ENTRY3.equals(ENTRY4));
assertFalse(ENTRY1.equals(null));
assertFalse(ENTRY1.equals(new Object()));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStatusHashCode(){
assertEquals(STATUS1.hashCode(),STATUS2.hashCode());
assertFalse(STATUS1.hashCode() == STATUS3.hashCode());
}
InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testStatusEquals(){
assertNotSame(STATUS1,STATUS2);
assertNotSame(STATUS1,STATUS3);
assertNotSame(STATUS2,STATUS3);
assertEquals(STATUS1,STATUS1);
assertEquals(STATUS2,STATUS2);
assertEquals(STATUS1,STATUS2);
assertEquals(STATUS2,STATUS1);
assertFalse(STATUS1.equals(STATUS3));
assertFalse(STATUS2.equals(STATUS3));
assertFalse(STATUS1.equals(null));
assertFalse(STATUS1.equals(new Object()));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testEntryHashCode(){
assertEquals(ENTRY1.hashCode(),ENTRY2.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY3.hashCode());
assertFalse(ENTRY1.hashCode() == ENTRY4.hashCode());
assertFalse(ENTRY3.hashCode() == ENTRY4.hashCode());
}
InternalCallVerifier BooleanVerifier
/**
* Ensure that when we set a sticky bit and shut down the file system, we get
* the sticky bit back on re-start, and that no extra sticky bits appear after
* re-start.
*/
@Test public void testStickyBitPersistence() throws Exception {
Path sbSet=new Path("/Housemartins");
Path sbNotSpecified=new Path("/INXS");
Path sbSetOff=new Path("/Easyworld");
for ( Path p : new Path[]{sbSet,sbNotSpecified,sbSetOff}) hdfs.mkdirs(p);
hdfs.setPermission(sbSet,new FsPermission((short)01777));
hdfs.setPermission(sbSetOff,new FsPermission((short)00777));
shutdown();
initCluster(false);
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
InternalCallVerifier BooleanVerifier
@Test public void testAclStickyBitPersistence() throws Exception {
Path sbSet=new Path("/Housemartins");
Path sbNotSpecified=new Path("/INXS");
Path sbSetOff=new Path("/Easyworld");
for ( Path p : new Path[]{sbSet,sbNotSpecified,sbSetOff}) hdfs.mkdirs(p);
hdfs.setPermission(sbSet,new FsPermission((short)01777));
applyAcl(sbSet);
hdfs.setPermission(sbSetOff,new FsPermission((short)00777));
applyAcl(sbSetOff);
shutdown();
initCluster(false);
assertTrue(hdfs.exists(sbSet));
assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbNotSpecified));
assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission().getStickyBit());
assertTrue(hdfs.exists(sbSetOff));
assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
}
InternalCallVerifier EqualityVerifier
@Test public void testLsNoRpcForGetAclStatus() throws Exception {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"stubfs:///");
conf.setClass("fs.stubfs.impl",StubFileSystem.class,FileSystem.class);
conf.setBoolean("stubfs.noRpcForGetAclStatus",true);
assertEquals("ls must succeed even if getAclStatus RPC does not exist.",0,ToolRunner.run(conf,new FsShell(),new String[]{"-ls","/"}));
}
InternalCallVerifier EqualityVerifier
@Test public void testLsAclsUnsupported() throws Exception {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"stubfs:///");
conf.setClass("fs.stubfs.impl",StubFileSystem.class,FileSystem.class);
assertEquals("ls must succeed even if FileSystem does not implement ACLs.",0,ToolRunner.run(conf,new FsShell(),new String[]{"-ls","/"}));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetInstances(){
factory.registerCommands(TestRegistrar.class);
Command instance;
instance=factory.getInstance("blarg");
assertNull(instance);
instance=factory.getInstance("tc1");
assertNotNull(instance);
assertEquals(TestCommand1.class,instance.getClass());
assertEquals("tc1",instance.getCommandName());
instance=factory.getInstance("tc2");
assertNotNull(instance);
assertEquals(TestCommand2.class,instance.getClass());
assertEquals("tc2",instance.getCommandName());
instance=factory.getInstance("tc2.1");
assertNotNull(instance);
assertEquals(TestCommand2.class,instance.getClass());
assertEquals("tc2.1",instance.getCommandName());
factory.addClass(TestCommand4.class,"tc4");
instance=factory.getInstance("tc4");
assertNotNull(instance);
assertEquals(TestCommand4.class,instance.getClass());
assertEquals("tc4",instance.getCommandName());
String usage=instance.getUsage();
assertEquals("-tc4 tc4_usage",usage);
assertEquals("tc4_description",instance.getDescription());
}
InternalCallVerifier EqualityVerifier
@Test public void testRegistration(){
assertArrayEquals(new String[]{},factory.getNames());
factory.registerCommands(TestRegistrar.class);
String[] names=factory.getNames();
assertArrayEquals(new String[]{"tc1","tc2","tc2.1"},names);
factory.addClass(TestCommand3.class,"tc3");
names=factory.getNames();
assertArrayEquals(new String[]{"tc1","tc2","tc2.1","tc3"},names);
factory.addClass(TestCommand4.class,(new TestCommand4()).getName());
names=factory.getNames();
assertArrayEquals(new String[]{"tc1","tc2","tc2.1","tc3","tc4"},names);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testDirectoryCpWithP() throws Exception {
run(new Cp(),"-p","d1","d3");
assertEquals(fs.getFileStatus(new Path("d1")).getModificationTime(),fs.getFileStatus(new Path("d3")).getModificationTime());
assertEquals(fs.getFileStatus(new Path("d1")).getPermission(),fs.getFileStatus(new Path("d3")).getPermission());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testDirectoryCpWithoutP() throws Exception {
run(new Cp(),"d1","d4");
assertTrue(fs.getFileStatus(new Path("d1")).getModificationTime() != fs.getFileStatus(new Path("d4")).getModificationTime());
assertTrue(!fs.getFileStatus(new Path("d1")).getPermission().equals(fs.getFileStatus(new Path("d4")).getPermission()));
}
InternalCallVerifier EqualityVerifier
@Test public void getCommandName(){
Count count=new Count();
String actual=count.getCommandName();
String expected="count";
assertEquals("Count.getCommandName",expected,actual);
}
InternalCallVerifier EqualityVerifier
@Test public void isDeprecated(){
Count count=new Count();
boolean actual=count.isDeprecated();
boolean expected=false;
assertEquals("Count.isDeprecated",expected,actual);
}
InternalCallVerifier EqualityVerifier
@Test public void getName(){
Count count=new Count();
String actual=count.getName();
String expected="count";
assertEquals("Count.getName",expected,actual);
}
InternalCallVerifier BooleanVerifier
@Test public void processOptionsAll(){
LinkedList options=new LinkedList();
options.add("-q");
options.add("-h");
options.add("dummy");
Count count=new Count();
count.processOptions(options);
assertTrue(count.isShowQuotas());
assertTrue(count.isHumanReadable());
}
InternalCallVerifier BooleanVerifier
@Test public void processOptionsHumanReadable(){
LinkedList options=new LinkedList();
options.add("-h");
options.add("dummy");
Count count=new Count();
count.processOptions(options);
assertFalse(count.isShowQuotas());
assertTrue(count.isHumanReadable());
}
InternalCallVerifier EqualityVerifier
@Test public void getUsage(){
Count count=new Count();
String actual=count.getUsage();
String expected="-count [-q] [-h] ...";
assertEquals("Count.getUsage",expected,actual);
}
InternalCallVerifier EqualityVerifier
@Test public void getReplacementCommand(){
Count count=new Count();
String actual=count.getReplacementCommand();
String expected=null;
assertEquals("Count.getReplacementCommand",expected,actual);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests whether binary Avro data files are displayed correctly.
*/
@Test public void testDisplayForAvroFiles() throws Exception {
createAvroFile(generateWeatherAvroBinaryData());
Configuration conf=fs.getConf();
PathData pathData=new PathData(AVRO_FILENAME.toString(),conf);
Display.Text text=new Display.Text();
text.setConf(conf);
Method method=text.getClass().getDeclaredMethod("getInputStream",PathData.class);
method.setAccessible(true);
InputStream stream=(InputStream)method.invoke(text,pathData);
String output=inputStreamToString(stream);
String expectedOutput="{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" + System.getProperty("line.separator") + "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}"+ System.getProperty("line.separator")+ "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}"+ System.getProperty("line.separator");
assertEquals(expectedOutput,output);
}
InternalCallVerifier BooleanVerifier
@Test public void testMoveTargetExistsWithoutExplicitRename() throws Exception {
Path srcPath=new Path("mockfs:/file");
Path targetPath=new Path("mockfs:/fold0");
Path dupPath=new Path("mockfs:/fold0/file");
Path srcPath2=new Path("mockfs://user/file");
Path targetPath2=new Path("mockfs://user/fold0");
Path dupPath2=new Path("mockfs://user/fold0/file");
InstrumentedRenameCommand cmd;
String[] cmdargs=new String[]{"mockfs:/file","mockfs:/fold0"};
FileStatus src_fileStat, target_fileStat, dup_fileStat;
URI myuri;
src_fileStat=mock(FileStatus.class);
target_fileStat=mock(FileStatus.class);
dup_fileStat=mock(FileStatus.class);
myuri=new URI("mockfs://user");
when(src_fileStat.isDirectory()).thenReturn(false);
when(target_fileStat.isDirectory()).thenReturn(true);
when(dup_fileStat.isDirectory()).thenReturn(false);
when(src_fileStat.getPath()).thenReturn(srcPath2);
when(target_fileStat.getPath()).thenReturn(targetPath2);
when(dup_fileStat.getPath()).thenReturn(dupPath2);
when(mockFs.getFileStatus(eq(srcPath))).thenReturn(src_fileStat);
when(mockFs.getFileStatus(eq(targetPath))).thenReturn(target_fileStat);
when(mockFs.getFileStatus(eq(dupPath))).thenReturn(dup_fileStat);
when(mockFs.getFileStatus(eq(srcPath2))).thenReturn(src_fileStat);
when(mockFs.getFileStatus(eq(targetPath2))).thenReturn(target_fileStat);
when(mockFs.getFileStatus(eq(dupPath2))).thenReturn(dup_fileStat);
when(mockFs.getUri()).thenReturn(myuri);
cmd=new InstrumentedRenameCommand();
cmd.setConf(conf);
cmd.setOverwrite(true);
cmd.run(cmdargs);
assertTrue("Rename should have failed with path exists exception",cmd.error instanceof PathExistsException);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testCwdContents() throws Exception {
String dirString=Path.CUR_DIR;
PathData item=new PathData(dirString,conf);
PathData[] items=item.getDirectoryContents();
assertEquals(sortedString("d1","d2"),sortedString(items));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testUnqualifiedUriContents() throws Exception {
String dirString="d1";
PathData item=new PathData(dirString,conf);
PathData[] items=item.getDirectoryContents();
assertEquals(sortedString("d1/f1","d1/f1.1","d1/f2"),sortedString(items));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testQualifiedUriContents() throws Exception {
String dirString=fs.makeQualified(new Path("d1")).toString();
PathData item=new PathData(dirString,conf);
PathData[] items=item.getDirectoryContents();
assertEquals(sortedString(dirString + "/f1",dirString + "/f1.1",dirString + "/f2"),sortedString(items));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testToFile() throws Exception {
PathData item=new PathData(".",conf);
assertEquals(new File(testDir.toString()),item.toFile());
item=new PathData("d1/f1",conf);
assertEquals(new File(testDir + "/d1/f1"),item.toFile());
item=new PathData(testDir + "/d1/f1",conf);
assertEquals(new File(testDir + "/d1/f1"),item.toFile());
}
InternalCallVerifier BooleanVerifier
@Test public void testRemoteExceptionUnwrap() throws Exception {
PathIOException pe;
RemoteException re;
IOException ie;
pe=new PathIOException(path);
re=new RemoteException(PathIOException.class.getName(),"test constructor1");
ie=re.unwrapRemoteException();
assertTrue(ie instanceof PathIOException);
ie=re.unwrapRemoteException(PathIOException.class);
assertTrue(ie instanceof PathIOException);
pe=new PathIOException(path,"constructor2");
re=new RemoteException(PathIOException.class.getName(),"test constructor2");
ie=re.unwrapRemoteException();
assertTrue(ie instanceof PathIOException);
ie=re.unwrapRemoteException(PathIOException.class);
assertTrue(ie instanceof PathIOException);
}
InternalCallVerifier EqualityVerifier
@Test public void testWithThrowable() throws Exception {
IOException ioe=new IOException("KABOOM");
PathIOException pe=new PathIOException(path,ioe);
assertEquals(new Path(path),pe.getPath());
assertEquals("`" + path + "': Input/output error: "+ error,pe.getMessage());
}
InternalCallVerifier EqualityVerifier
@Test public void testWithDefaultString() throws Exception {
PathIOException pe=new PathIOException(path);
assertEquals(new Path(path),pe.getPath());
assertEquals("`" + path + "': Input/output error",pe.getMessage());
}
InternalCallVerifier EqualityVerifier
@Test public void testWithCustomString() throws Exception {
PathIOException pe=new PathIOException(path,error);
assertEquals(new Path(path),pe.getPath());
assertEquals("`" + path + "': "+ error,pe.getMessage());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests whether binary Avro data files are displayed correctly.
*/
@Test(timeout=30000) public void testDisplayForAvroFiles() throws Exception {
createAvroFile(generateWeatherAvroBinaryData());
Configuration conf=new Configuration();
URI localPath=new URI(AVRO_FILENAME);
PathData pathData=new PathData(localPath,conf);
Display.Text text=new Display.Text();
text.setConf(conf);
Method method=text.getClass().getDeclaredMethod("getInputStream",PathData.class);
method.setAccessible(true);
InputStream stream=(InputStream)method.invoke(text,pathData);
String output=inputStreamToString(stream);
String expectedOutput="{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" + System.getProperty("line.separator") + "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}"+ System.getProperty("line.separator")+ "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}"+ System.getProperty("line.separator");
assertEquals(expectedOutput,output);
}
InternalCallVerifier EqualityVerifier
@Test public void testArguments() throws Exception {
ConfigExtractor extractor=getTestConfig(true);
assertEquals(extractor.getOpCount().intValue(),Constants.OperationType.values().length);
assertEquals(extractor.getMapAmount().intValue(),2);
assertEquals(extractor.getReducerAmount().intValue(),2);
Range apRange=extractor.getAppendSize();
assertEquals(apRange.getLower().intValue(),Constants.MEGABYTES * 1);
assertEquals(apRange.getUpper().intValue(),Constants.MEGABYTES * 2);
Range wRange=extractor.getWriteSize();
assertEquals(wRange.getLower().intValue(),Constants.MEGABYTES * 1);
assertEquals(wRange.getUpper().intValue(),Constants.MEGABYTES * 2);
Range bRange=extractor.getBlockSize();
assertEquals(bRange.getLower().intValue(),Constants.MEGABYTES * 1);
assertEquals(bRange.getUpper().intValue(),Constants.MEGABYTES * 2);
String resfile=extractor.getResultFile();
assertEquals(resfile,getResultFile().toString());
int durationMs=extractor.getDurationMilliseconds();
assertEquals(durationMs,10 * 1000);
}
InternalCallVerifier BooleanVerifier
@Test public void testBadChunks() throws Exception {
File fn=getTestFile();
int byteAm=10000;
FileOutputStream fout=new FileOutputStream(fn);
byte[] bytes=new byte[byteAm];
rnd.nextBytes(bytes);
fout.write(bytes);
fout.close();
DataVerifier vf=new DataVerifier();
VerifyOutput vout=new VerifyOutput(0,0,0,0);
DataInputStream in=null;
try {
in=new DataInputStream(new FileInputStream(fn));
vout=vf.verifyFile(byteAm,in);
}
catch ( Exception e) {
}
finally {
if (in != null) in.close();
}
assertTrue(vout.getChunksSame() == 0);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDataWriting() throws Exception {
long byteAm=100;
File fn=getTestFile();
DataWriter writer=new DataWriter(rnd);
FileOutputStream fs=new FileOutputStream(fn);
GenerateOutput ostat=writer.writeSegment(byteAm,fs);
LOG.info(ostat);
fs.close();
assertTrue(ostat.getBytesWritten() == byteAm);
DataVerifier vf=new DataVerifier();
FileInputStream fin=new FileInputStream(fn);
VerifyOutput vfout=vf.verifyFile(byteAm,new DataInputStream(fin));
LOG.info(vfout);
fin.close();
assertEquals(vfout.getBytesRead(),byteAm);
assertTrue(vfout.getChunksDifferent() == 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMRFlow() throws Exception {
ConfigExtractor extractor=getTestConfig(false);
SliveTest s=new SliveTest(getBaseConfig());
int ec=ToolRunner.run(s,getTestArgs(false));
assertTrue(ec == 0);
String resFile=extractor.getResultFile();
File fn=new File(resFile);
assertTrue(fn.exists());
}
InternalCallVerifier EqualityVerifier
@Test public void testRange(){
Range r=new Range(10L,20L);
assertEquals(r.getLower().longValue(),10L);
assertEquals(r.getUpper().longValue(),20L);
}
InternalCallVerifier BooleanVerifier
@Test public void testSelector() throws Exception {
ConfigExtractor extractor=getTestConfig(false);
RouletteSelector selector=new RouletteSelector(rnd);
List sList=new LinkedList();
Operation op=selector.select(sList);
assertTrue(op == null);
CreateOp cop=new CreateOp(extractor,rnd);
sList.add(new OperationWeight(cop,1.0d));
AppendOp aop=new AppendOp(extractor,rnd);
sList.add(new OperationWeight(aop,0.01d));
op=selector.select(sList);
assertTrue(op == cop);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Seek past the buffer then read
* @throws Throwable problems
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable {
instream=fs.open(readFile);
assertEquals(0,instream.getPos());
instream.seek(SEEK_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Seek past the buffer and attempt a read(buffer)
* @throws Throwable failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekBulkReadPastEndOfFile() throws Throwable {
instream=fs.open(readFile);
assertEquals(0,instream.getPos());
instream.seek(SEEK_FILE_LEN - 1);
byte[] buffer=new byte[1];
int result=instream.read(buffer,0,1);
result=instream.read(buffer,0,1);
assertMinusOne("read past end of file",result);
result=instream.read(buffer,0,1);
assertMinusOne("read past end of file",result);
result=instream.read(buffer,0,0);
assertEquals("EOF checks coming before read range check",0,result);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndPastEndOfFileThenReseekAndRead() throws Throwable {
instream=fs.open(smallSeekFile);
try {
instream.seek(SMALL_SEEK_FILE_LEN);
assertMinusOne("read after seeking past EOF",instream.read());
}
catch ( EOFException expected) {
}
instream.seek(1);
assertTrue("Premature EOF",instream.read() != -1);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
Path testSeekFile=new Path(testPath,"bigseekfile.txt");
byte[] block=SwiftTestUtils.dataset(65536,0,255);
createFile(testSeekFile,block);
instream=fs.open(testSeekFile);
instream.seek(39999);
assertTrue(-1 != instream.read());
assertEquals(40000,instream.getPos());
byte[] readBuffer=new byte[256];
instream.read(128,readBuffer,0,readBuffer.length);
assertEquals(40000,instream.getPos());
assertEquals("@40000",block[40000],(byte)instream.read());
for (int i=0; i < 256; i++) {
assertEquals("@" + i,block[i + 128],readBuffer[i]);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(SMALL_SEEK_FILE_LEN - 2);
assertTrue("Premature EOF",instream.read() != -1);
assertTrue("Premature EOF",instream.read() != -1);
assertMinusOne("read past end of file",instream.read());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testNegativeSeek() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
try {
instream.seek(-1);
long p=instream.getPos();
LOG.warn("Seek to -1 returned a position of " + p);
int result=instream.read();
fail("expected an exception, got data " + result + " at a position of "+ p);
}
catch ( IOException e) {
}
assertEquals(0,instream.getPos());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekBigFile() throws Throwable {
Path testSeekFile=new Path(testPath,"bigseekfile.txt");
byte[] block=SwiftTestUtils.dataset(65536,0,255);
createFile(testSeekFile,block);
instream=fs.open(testSeekFile);
assertEquals(0,instream.getPos());
instream.seek(0);
int result=instream.read();
assertEquals(0,result);
assertEquals(1,instream.read());
assertEquals(2,instream.read());
instream.seek(32768);
assertEquals("@32768",block[32768],(byte)instream.read());
instream.seek(40000);
assertEquals("@40000",block[40000],(byte)instream.read());
instream.seek(8191);
assertEquals("@8191",block[8191],(byte)instream.read());
instream.seek(0);
assertEquals("@0",0,(byte)instream.read());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekFile() throws Throwable {
instream=fs.open(smallSeekFile);
assertEquals(0,instream.getPos());
instream.seek(0);
int result=instream.read();
assertEquals(0,result);
assertEquals(1,instream.read());
assertEquals(2,instream.getPos());
assertEquals(2,instream.read());
assertEquals(3,instream.getPos());
instream.seek(128);
assertEquals(128,instream.getPos());
assertEquals(128,instream.read());
instream.seek(63);
assertEquals(63,instream.read());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testProxyData() throws Exception {
final Configuration configuration=createCoreConfig();
String proxy="web-proxy";
int port=8088;
configuration.set(SWIFT_PROXY_HOST_PROPERTY,proxy);
configuration.set(SWIFT_PROXY_PORT_PROPERTY,Integer.toString(port));
SwiftRestClient restClient=mkInstance(configuration);
assertEquals(proxy,restClient.getProxyHost());
assertEquals(port,restClient.getProxyPort());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPositivePartsize() throws Exception {
final Configuration configuration=createCoreConfig();
int size=127;
configuration.set(SWIFT_PARTITION_SIZE,Integer.toString(size));
SwiftRestClient restClient=mkInstance(configuration);
assertEquals(size,restClient.getPartSizeKB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPositiveBlocksize() throws Exception {
final Configuration configuration=createCoreConfig();
int size=127;
configuration.set(SWIFT_BLOCKSIZE,Integer.toString(size));
SwiftRestClient restClient=mkInstance(configuration);
assertEquals(size,restClient.getBlocksizeKB());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testBlocksizeNonZeroForFile() throws Throwable {
Path smallfile=new Path("/test/smallfile");
SwiftTestUtils.writeTextFile(fs,smallfile,"blocksize",true);
createFile(smallfile);
FileStatus status=getFs().getFileStatus(smallfile);
assertTrue("Zero blocksize in " + status,status.getBlockSize() != 0L);
assertTrue("Zero replication in " + status,status.getReplication() != 0L);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test that a dir off root has a listStatus() call that
* works as expected. and that when a child is added. it changes
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesOffRootHaveMatchingFileStatus() throws Exception {
Path test=path("/test");
fs.delete(test,true);
mkdirs(test);
assertExists("created test directory",test);
FileStatus[] statuses=fs.listStatus(test);
String statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length);
Path src=path("/test/file");
SwiftTestUtils.touch(fs,src);
statuses=fs.listStatus(test);
statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,1,statuses.length);
SwiftFileStatus stat=(SwiftFileStatus)statuses[0];
assertTrue("isDir(): Not a directory: " + stat,stat.isDir());
extraStatusAssertions(stat);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* test that a dir two levels down has a listStatus() call that
* works as expected.
* @throws Exception on failures
*/
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesLowerDownHaveMatchingFileStatus() throws Exception {
Path test=path("/test/testDirectoriesLowerDownHaveMatchingFileStatus");
fs.delete(test,true);
mkdirs(test);
assertExists("created test sub directory",test);
FileStatus[] statuses=fs.listStatus(test);
String statusString=statusToString(test.toString(),statuses);
assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testWriteReadFile() throws Exception {
final Path f=new Path("/test/test");
final FSDataOutputStream fsDataOutputStream=fs.create(f);
final String message="Test string";
fsDataOutputStream.write(message.getBytes());
fsDataOutputStream.close();
assertExists("created file",f);
FSDataInputStream open=null;
try {
open=fs.open(f);
final byte[] bytes=new byte[512];
final int read=open.read(bytes);
final byte[] buffer=new byte[read];
System.arraycopy(bytes,0,buffer,0,read);
assertEquals(message,new String(buffer));
}
finally {
fs.delete(f,false);
IOUtils.closeStream(open);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* tests functionality for big files ( > 5Gb) upload
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testFilePartUpload() throws Throwable {
final Path path=new Path("/test/testFilePartUpload");
int len=8192;
final byte[] src=SwiftTestUtils.dataset(len,32,144);
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE);
try {
int totalPartitionsToWrite=len / PART_SIZE_BYTES;
assertPartitionsWritten("Startup",out,0);
int firstWriteLen=2048;
out.write(src,0,firstWriteLen);
long expected=getExpectedPartitionsWritten(firstWriteLen,PART_SIZE_BYTES,false);
SwiftUtils.debug(LOG,"First write: predict %d partitions written",expected);
assertPartitionsWritten("First write completed",out,expected);
int remainder=len - firstWriteLen;
SwiftUtils.debug(LOG,"remainder: writing: %d bytes",remainder);
out.write(src,firstWriteLen,remainder);
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,false);
assertPartitionsWritten("Remaining data",out,expected);
out.close();
expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true);
assertPartitionsWritten("Stream closed",out,expected);
Header[] headers=fs.getStore().getObjectHeaders(path,true);
for ( Header header : headers) {
LOG.info(header.toString());
}
byte[] dest=readDataset(fs,path,len);
LOG.info("Read dataset from " + path + ": data length ="+ len);
SwiftTestUtils.compareByteArrays(src,dest,len);
FileStatus status;
final Path qualifiedPath=path.makeQualified(fs);
status=fs.getFileStatus(qualifiedPath);
BlockLocation[] locations=fs.getFileBlockLocations(status,0,len);
assertNotNull("Null getFileBlockLocations()",locations);
assertTrue("empty array returned for getFileBlockLocations()",locations.length > 0);
try {
validatePathLen(path,len);
}
catch ( AssertionError e) {
throw new AssumptionViolatedException(e,null);
}
}
finally {
IOUtils.closeStream(out);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testManyPartitionedFile() throws Throwable {
final Path path=new Path("/test/testManyPartitionedFile");
int len=PART_SIZE_BYTES * 15;
final byte[] src=SwiftTestUtils.dataset(len,32,144);
FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE);
out.write(src,0,src.length);
int expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true);
out.close();
assertPartitionsWritten("write completed",out,expected);
assertEquals("too few bytes written",len,SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded",len,SwiftNativeFileSystem.getBytesUploaded(out));
byte[] dest=readDataset(fs,path,len);
SwiftTestUtils.compareByteArrays(src,dest,len);
FileStatus[] stats=fs.listStatus(path);
assertEquals("wrong entry count in " + SwiftTestUtils.dumpStats(path.toString(),stats),expected,stats.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFile() throws Exception {
assumeRenameSupported();
final Path old=new Path("/test/alice/file");
final Path newPath=new Path("/test/bob/file");
fs.mkdirs(newPath.getParent());
final FSDataOutputStream fsDataOutputStream=fs.create(old);
final byte[] message="Some data".getBytes();
fsDataOutputStream.write(message);
fsDataOutputStream.close();
assertTrue(fs.exists(old));
rename(old,newPath,true,false,true);
final FSDataInputStream bobStream=fs.open(newPath);
final byte[] bytes=new byte[512];
final int read=bobStream.read(bytes);
bobStream.close();
final byte[] buffer=new byte[read];
System.arraycopy(bytes,0,buffer,0,read);
assertEquals(new String(message),new String(buffer));
}
InternalCallVerifier EqualityVerifier
@Test public void testResolvePath() throws IOException {
Assert.assertEquals(chrootedTo,fSys.resolvePath(new Path("/")));
fileSystemTestHelper.createFile(fSys,"/foo");
Assert.assertEquals(new Path(chrootedTo,"foo"),fSys.resolvePath(new Path("/foo")));
}
InternalCallVerifier BooleanVerifier
@Test public void testMkdirDelete() throws IOException {
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys,"/dirX"));
Assert.assertTrue(fSys.isDirectory(new Path("/dirX")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX")));
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys,"/dirX/dirY"));
Assert.assertTrue(fSys.isDirectory(new Path("/dirX/dirY")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fSys.delete(new Path("/dirX/dirY"),false));
Assert.assertFalse(fSys.exists(new Path("/dirX/dirY")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fSys.delete(new Path("/dirX"),false));
Assert.assertFalse(fSys.exists(new Path("/dirX")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"dirX")));
}
InternalCallVerifier EqualityVerifier
@Test public void testURI(){
URI uri=fSys.getUri();
Assert.assertEquals(chrootedTo.toUri(),uri);
}
InternalCallVerifier BooleanVerifier
@Test public void testRename() throws IOException {
fileSystemTestHelper.createFile(fSys,"/newDir/foo");
fSys.rename(new Path("/newDir/foo"),new Path("/newDir/fooBar"));
Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(fSys.isFile(fileSystemTestHelper.getTestRootPath(fSys,"/newDir/fooBar")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/fooBar")));
fSys.mkdirs(new Path("/newDir/dirFoo"));
fSys.rename(new Path("/newDir/dirFoo"),new Path("/newDir/dirFooBar"));
Assert.assertFalse(fSys.exists(new Path("/newDir/dirFoo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/dirFoo")));
Assert.assertTrue(fSys.isDirectory(fileSystemTestHelper.getTestRootPath(fSys,"/newDir/dirFooBar")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"newDir/dirFooBar")));
}
InternalCallVerifier EqualityVerifier
@Test public void testBasicPaths(){
URI uri=fSys.getUri();
Assert.assertEquals(chrootedTo.toUri(),uri);
Assert.assertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))),fSys.getWorkingDirectory());
Assert.assertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))),fSys.getHomeDirectory());
Assert.assertEquals(new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI,null),fSys.makeQualified(new Path("/foo/bar")));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
fSys.mkdirs(new Path("/testWd"));
Path workDir=new Path("/testWd");
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path("."));
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
fSys.setWorkingDirectory(new Path(".."));
Assert.assertEquals(workDir.getParent(),fSys.getWorkingDirectory());
workDir=new Path("/testWd");
fSys.setWorkingDirectory(workDir);
Assert.assertEquals(workDir,fSys.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(relativeDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
absoluteDir=new Path("/test/existingDir2");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
Path absoluteFooPath=new Path(absoluteDir,"foo");
fSys.create(absoluteFooPath).close();
fSys.open(new Path("foo")).close();
fSys.mkdirs(new Path("newDir"));
Assert.assertTrue(fSys.isDirectory(new Path(absoluteDir,"newDir")));
final String LOCAL_FS_ROOT_URI="file:///tmp/test";
absoluteDir=new Path(LOCAL_FS_ROOT_URI + "/existingDir");
fSys.mkdirs(absoluteDir);
fSys.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory());
}
InternalCallVerifier BooleanVerifier
/**
* Test modify operations (create, mkdir, delete, etc)
* Verify the operation via chrootedfs (ie fSys) and *also* via the
* target file system (ie fSysTarget) that has been chrooted.
*/
@Test public void testCreateDelete() throws IOException {
fileSystemTestHelper.createFile(fSys,"/foo");
Assert.assertTrue(fSys.isFile(new Path("/foo")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"foo")));
fileSystemTestHelper.createFile(fSys,"/newDir/foo");
Assert.assertTrue(fSys.isFile(new Path("/newDir/foo")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(fSys.delete(new Path("/newDir/foo"),false));
Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/foo")));
fileSystemTestHelper.createFile(fSys,"/newDir/newDir2/foo");
Assert.assertTrue(fSys.isFile(new Path("/newDir/newDir2/foo")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/newDir2/foo")));
Assert.assertTrue(fSys.delete(new Path("/newDir/newDir2/foo"),false));
Assert.assertFalse(fSys.exists(new Path("/newDir/newDir2/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/newDir2/foo")));
}
InternalCallVerifier EqualityVerifier
@Test public void testGetContentSummary() throws IOException {
fSys.mkdirs(new Path("/newDir/dirFoo"));
ContentSummary cs=fSys.getContentSummary(new Path("/newDir/dirFoo"));
Assert.assertEquals(-1L,cs.getQuota());
Assert.assertEquals(-1L,cs.getSpaceQuota());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testList() throws IOException {
FileStatus fs=fSys.getFileStatus(new Path("/"));
Assert.assertTrue(fs.isDirectory());
Assert.assertEquals(fs.getPath(),chrootedTo);
FileStatus[] dirPaths=fSys.listStatus(new Path("/"));
Assert.assertEquals(0,dirPaths.length);
fileSystemTestHelper.createFile(fSys,"/foo");
fileSystemTestHelper.createFile(fSys,"/bar");
fSys.mkdirs(new Path("/dirX"));
fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys,"/dirY"));
fSys.mkdirs(new Path("/dirX/dirXX"));
dirPaths=fSys.listStatus(new Path("/"));
Assert.assertEquals(4,dirPaths.length);
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"foo"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"bar"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirX"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirY"),dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
}
InternalCallVerifier BooleanVerifier
@Test public void testRename() throws IOException {
fileContextTestHelper.createFile(fc,"/newDir/foo");
fc.rename(new Path("/newDir/foo"),new Path("/newDir/fooBar"));
Assert.assertFalse(exists(fc,new Path("/newDir/foo")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(isFile(fc,fileContextTestHelper.getTestRootPath(fc,"/newDir/fooBar")));
Assert.assertTrue(isFile(fcTarget,new Path(chrootedTo,"newDir/fooBar")));
fc.mkdir(new Path("/newDir/dirFoo"),FileContext.DEFAULT_PERM,false);
fc.rename(new Path("/newDir/dirFoo"),new Path("/newDir/dirFooBar"));
Assert.assertFalse(exists(fc,new Path("/newDir/dirFoo")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"newDir/dirFoo")));
Assert.assertTrue(isDir(fc,fileContextTestHelper.getTestRootPath(fc,"/newDir/dirFooBar")));
Assert.assertTrue(isDir(fcTarget,new Path(chrootedTo,"newDir/dirFooBar")));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testList() throws IOException {
FileStatus fs=fc.getFileStatus(new Path("/"));
Assert.assertTrue(fs.isDirectory());
Assert.assertEquals(fs.getPath(),chrootedTo);
FileStatus[] dirPaths=fc.util().listStatus(new Path("/"));
Assert.assertEquals(0,dirPaths.length);
fileContextTestHelper.createFileNonRecursive(fc,"/foo");
fileContextTestHelper.createFileNonRecursive(fc,"/bar");
fc.mkdir(new Path("/dirX"),FileContext.DEFAULT_PERM,false);
fc.mkdir(fileContextTestHelper.getTestRootPath(fc,"/dirY"),FileContext.DEFAULT_PERM,false);
fc.mkdir(new Path("/dirX/dirXX"),FileContext.DEFAULT_PERM,false);
dirPaths=fc.util().listStatus(new Path("/"));
Assert.assertEquals(4,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcTarget,"foo",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=fileContextTestHelper.containsPath(fcTarget,"bar",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isFile());
fs=fileContextTestHelper.containsPath(fcTarget,"dirX",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcTarget,"dirY",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue(fs.isDirectory());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWorkingDirectory() throws Exception {
fc.mkdir(new Path("/testWd"),FileContext.DEFAULT_PERM,false);
Path workDir=new Path("/testWd");
Path fqWd=fc.makeQualified(workDir);
fc.setWorkingDirectory(workDir);
Assert.assertEquals(fqWd,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path("."));
Assert.assertEquals(fqWd,fc.getWorkingDirectory());
fc.setWorkingDirectory(new Path(".."));
Assert.assertEquals(fqWd.getParent(),fc.getWorkingDirectory());
workDir=new Path("/testWd");
fqWd=fc.makeQualified(workDir);
fc.setWorkingDirectory(workDir);
Assert.assertEquals(fqWd,fc.getWorkingDirectory());
Path relativeDir=new Path("existingDir1");
Path absoluteDir=new Path(workDir,"existingDir1");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
Path fqAbsoluteDir=fc.makeQualified(absoluteDir);
fc.setWorkingDirectory(relativeDir);
Assert.assertEquals(fqAbsoluteDir,fc.getWorkingDirectory());
absoluteDir=new Path("/test/existingDir2");
fqAbsoluteDir=fc.makeQualified(absoluteDir);
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(fqAbsoluteDir,fc.getWorkingDirectory());
Path absolutePath=new Path(absoluteDir,"foo");
fc.create(absolutePath,EnumSet.of(CreateFlag.CREATE)).close();
fc.open(new Path("foo")).close();
fc.mkdir(new Path("newDir"),FileContext.DEFAULT_PERM,true);
Assert.assertTrue(isDir(fc,new Path(absoluteDir,"newDir")));
absoluteDir=fileContextTestHelper.getTestRootPath(fc,"nonexistingPath");
try {
fc.setWorkingDirectory(absoluteDir);
Assert.fail("cd to non existing dir should have failed");
}
catch ( Exception e) {
}
final String LOCAL_FS_ROOT_URI="file:///tmp/test";
absoluteDir=new Path(LOCAL_FS_ROOT_URI + "/existingDir");
fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true);
fc.setWorkingDirectory(absoluteDir);
Assert.assertEquals(absoluteDir,fc.getWorkingDirectory());
}
InternalCallVerifier EqualityVerifier
@Test public void testBasicPaths(){
URI uri=fc.getDefaultFileSystem().getUri();
Assert.assertEquals(chrootedTo.toUri(),uri);
Assert.assertEquals(fc.makeQualified(new Path(System.getProperty("user.home"))),fc.getWorkingDirectory());
Assert.assertEquals(fc.makeQualified(new Path(System.getProperty("user.home"))),fc.getHomeDirectory());
Assert.assertEquals(new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI,null),fc.makeQualified(new Path("/foo/bar")));
}
InternalCallVerifier EqualityVerifier
@Test public void testResolvePath() throws IOException {
Assert.assertEquals(chrootedTo,fc.getDefaultFileSystem().resolvePath(new Path("/")));
fileContextTestHelper.createFile(fc,"/foo");
Assert.assertEquals(new Path(chrootedTo,"foo"),fc.getDefaultFileSystem().resolvePath(new Path("/foo")));
}
InternalCallVerifier BooleanVerifier
@Test public void testIsValidNameValidInBaseFs() throws Exception {
AbstractFileSystem baseFs=Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs=new ChRootedFs(baseFs,new Path("/chroot"));
Mockito.doReturn(true).when(baseFs).isValidName(Mockito.anyString());
Assert.assertTrue(chRootedFs.isValidName("/test"));
Mockito.verify(baseFs).isValidName("/chroot/test");
}
InternalCallVerifier BooleanVerifier
@Test public void testMkdirDelete() throws IOException {
fc.mkdir(fileContextTestHelper.getTestRootPath(fc,"/dirX"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue(isDir(fc,new Path("/dirX")));
Assert.assertTrue(isDir(fcTarget,new Path(chrootedTo,"dirX")));
fc.mkdir(fileContextTestHelper.getTestRootPath(fc,"/dirX/dirY"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue(isDir(fc,new Path("/dirX/dirY")));
Assert.assertTrue(isDir(fcTarget,new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fc.delete(new Path("/dirX/dirY"),false));
Assert.assertFalse(exists(fc,new Path("/dirX/dirY")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"dirX/dirY")));
Assert.assertTrue(fc.delete(new Path("/dirX"),false));
Assert.assertFalse(exists(fc,new Path("/dirX")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"dirX")));
}
InternalCallVerifier BooleanVerifier
/**
* Test modify operations (create, mkdir, delete, etc)
* Verify the operation via chrootedfs (ie fc) and *also* via the
* target file system (ie fclocal) that has been chrooted.
*/
@Test public void testCreateDelete() throws IOException {
fileContextTestHelper.createFileNonRecursive(fc,"/foo");
Assert.assertTrue(isFile(fc,new Path("/foo")));
Assert.assertTrue(isFile(fcTarget,new Path(chrootedTo,"foo")));
fileContextTestHelper.createFile(fc,"/newDir/foo");
Assert.assertTrue(isFile(fc,new Path("/newDir/foo")));
Assert.assertTrue(isFile(fcTarget,new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(fc.delete(new Path("/newDir/foo"),false));
Assert.assertFalse(exists(fc,new Path("/newDir/foo")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"newDir/foo")));
fileContextTestHelper.createFile(fc,"/newDir/newDir2/foo");
Assert.assertTrue(isFile(fc,new Path("/newDir/newDir2/foo")));
Assert.assertTrue(isFile(fcTarget,new Path(chrootedTo,"newDir/newDir2/foo")));
Assert.assertTrue(fc.delete(new Path("/newDir/newDir2/foo"),false));
Assert.assertFalse(exists(fc,new Path("/newDir/newDir2/foo")));
Assert.assertFalse(exists(fcTarget,new Path(chrootedTo,"newDir/newDir2/foo")));
}
InternalCallVerifier BooleanVerifier
@Test public void testIsValidNameInvalidInBaseFs() throws Exception {
AbstractFileSystem baseFs=Mockito.spy(fc.getDefaultFileSystem());
ChRootedFs chRootedFs=new ChRootedFs(baseFs,new Path("/chroot"));
Mockito.doReturn(false).when(baseFs).isValidName(Mockito.anyString());
Assert.assertFalse(chRootedFs.isValidName("/test"));
Mockito.verify(baseFs).isValidName("/chroot/test");
}
APIUtilityVerifier InternalCallVerifier NullVerifier
/**
* Regression test for HADOOP-8408.
*/
@Test public void testGetCanonicalServiceNameWithNonDefaultMountTable() throws URISyntaxException, IOException {
Configuration conf=new Configuration();
ConfigUtil.addLink(conf,MOUNT_TABLE_NAME,"/user",new URI("file:///"));
FileSystem viewFs=FileSystem.get(new URI(FsConstants.VIEWFS_SCHEME + "://" + MOUNT_TABLE_NAME),conf);
String serviceName=viewFs.getCanonicalServiceName();
assertNull(serviceName);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetChildFileSystems() throws Exception {
assertNull(fs1.getChildFileSystems());
assertNull(fs2.getChildFileSystems());
List children=Arrays.asList(viewFs.getChildFileSystems());
assertEquals(2,children.size());
assertTrue(children.contains(fs1));
assertTrue(children.contains(fs2));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAddDelegationTokens() throws Exception {
Credentials creds=new Credentials();
Token> fs1Tokens[]=addTokensWithCreds(fs1,creds);
assertEquals(1,fs1Tokens.length);
assertEquals(1,creds.numberOfTokens());
Token> fs2Tokens[]=addTokensWithCreds(fs2,creds);
assertEquals(1,fs2Tokens.length);
assertEquals(2,creds.numberOfTokens());
Credentials savedCreds=creds;
creds=new Credentials();
Token> viewFsTokens[]=viewFs.addDelegationTokens("me",creds);
assertEquals(2,viewFsTokens.length);
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens());
viewFsTokens=viewFs.addDelegationTokens("me",creds);
assertEquals(0,viewFsTokens.length);
assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens()));
assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testGetCanonicalServiceNameWithDefaultMountTable() throws URISyntaxException, IOException {
Configuration conf=new Configuration();
ConfigUtil.addLink(conf,"/user",new URI("file:///"));
FileSystem viewFs=FileSystem.get(FsConstants.VIEWFS_URI,conf);
String serviceName=viewFs.getCanonicalServiceName();
assertNull(serviceName);
}
InternalCallVerifier EqualityVerifier
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the ACL operations to the correct NameNode.
*/
@Test public void testAclOnMountEntry() throws Exception {
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE));
fsView.setAcl(mountOnNn1,aclSpec);
AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)};
assertArrayEquals(expected,aclEntryArray(fsView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected,aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",READ));
fsView.modifyAclEntries(mountOnNn1,aclSpec);
expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(DEFAULT,USER,READ_WRITE),aclEntry(DEFAULT,USER,"foo",READ),aclEntry(DEFAULT,GROUP,READ),aclEntry(DEFAULT,MASK,READ),aclEntry(DEFAULT,OTHER,NONE)};
assertArrayEquals(expected,aclEntryArray(fsView.getAclStatus(mountOnNn1)));
fsView.removeDefaultAcl(mountOnNn1);
expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)};
assertArrayEquals(expected,aclEntryArray(fsView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected,aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
assertEquals(0,fsView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0,fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
fsView.removeAcl(mountOnNn1);
assertEquals(0,fsView.getAclStatus(mountOnNn1).getEntries().size());
assertEquals(0,fHdfs.getAclStatus(targetTestRoot).getEntries().size());
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bar",READ));
fsView.modifyAclEntries(mountOnNn2,aclSpec);
expected=new AclEntry[]{aclEntry(ACCESS,USER,"bar",READ),aclEntry(ACCESS,GROUP,READ_EXECUTE)};
assertArrayEquals(expected,aclEntryArray(fsView.getAclStatus(mountOnNn2)));
assertArrayEquals(expected,aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
fsView.removeAclEntries(mountOnNn2,Lists.newArrayList(aclEntry(ACCESS,USER,"bar",READ)));
expected=new AclEntry[]{aclEntry(ACCESS,GROUP,READ_EXECUTE)};
assertArrayEquals(expected,aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
fsView.removeAcl(mountOnNn2);
assertEquals(0,fsView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0,fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
}
InternalCallVerifier EqualityVerifier
/**
* Verify a ViewFileSystem wrapped over multiple federated NameNodes will
* dispatch the XAttr operations to the correct NameNode.
*/
@Test public void testXAttrOnMountEntry() throws Exception {
fsView.setXAttr(mountOnNn1,name1,value1);
fsView.setXAttr(mountOnNn1,name2,value2);
assertEquals(2,fsView.getXAttrs(mountOnNn1).size());
assertArrayEquals(value1,fsView.getXAttr(mountOnNn1,name1));
assertArrayEquals(value2,fsView.getXAttr(mountOnNn1,name2));
assertArrayEquals(value1,fHdfs.getXAttr(targetTestRoot,name1));
assertArrayEquals(value2,fHdfs.getXAttr(targetTestRoot,name2));
assertEquals(0,fsView.getXAttrs(mountOnNn2).size());
assertEquals(0,fHdfs2.getXAttrs(targetTestRoot2).size());
fsView.removeXAttr(mountOnNn1,name1);
fsView.removeXAttr(mountOnNn1,name2);
assertEquals(0,fsView.getXAttrs(mountOnNn1).size());
assertEquals(0,fHdfs.getXAttrs(targetTestRoot).size());
fsView.setXAttr(mountOnNn2,name1,value1);
fsView.setXAttr(mountOnNn2,name2,value2);
assertEquals(2,fsView.getXAttrs(mountOnNn2).size());
assertArrayEquals(value1,fsView.getXAttr(mountOnNn2,name1));
assertArrayEquals(value2,fsView.getXAttr(mountOnNn2,name2));
assertArrayEquals(value1,fHdfs2.getXAttr(targetTestRoot2,name1));
assertArrayEquals(value2,fHdfs2.getXAttr(targetTestRoot2,name2));
fsView.removeXAttr(mountOnNn2,name1);
fsView.removeXAttr(mountOnNn2,name2);
assertEquals(0,fsView.getXAttrs(mountOnNn2).size());
assertEquals(0,fHdfs2.getXAttrs(targetTestRoot2).size());
}
InternalCallVerifier EqualityVerifier
/**
* Test that getContentSummary can be retrieved on the client side.
*/
@Test public void testGetContentSummary() throws IOException {
FileSystem hFs=cluster.getFileSystem(0);
final DistributedFileSystem dfs=(DistributedFileSystem)hFs;
dfs.setQuota(testFileDirPath,100,500);
ContentSummary cs=vfs.getContentSummary(testFileDirPath);
assertEquals(100,cs.getQuota());
assertEquals(500,cs.getSpaceQuota());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test that default blocksize values can be retrieved on the client side.
*/
@Test public void testGetDefaultBlockSize() throws IOException, URISyntaxException {
try {
vfs.getDefaultBlockSize();
fail("getServerDefaults on viewFs did not throw excetion!");
}
catch ( NotInMountpointException e) {
assertEquals(vfs.getDefaultBlockSize(testFilePath),DFS_BLOCK_SIZE_DEFAULT);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test that server default values can be retrieved on the client side.
*/
@Test public void testServerDefaults() throws IOException {
try {
FsServerDefaults serverDefaults=vfs.getServerDefaults();
fail("getServerDefaults on viewFs did not throw excetion!");
}
catch ( NotInMountpointException e) {
FsServerDefaults serverDefaults=vfs.getServerDefaults(testFilePath);
assertEquals(DFS_BLOCK_SIZE_DEFAULT,serverDefaults.getBlockSize());
assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT,serverDefaults.getBytesPerChecksum());
assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT,serverDefaults.getWritePacketSize());
assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT,serverDefaults.getFileBufferSize());
assertEquals(DFS_REPLICATION_DEFAULT + 1,serverDefaults.getReplication());
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test that default replication values can be retrieved on the client side.
*/
@Test public void testGetDefaultReplication() throws IOException, URISyntaxException {
try {
vfs.getDefaultReplication();
fail("getDefaultReplication on viewFs did not throw excetion!");
}
catch ( NotInMountpointException e) {
assertEquals(vfs.getDefaultReplication(testFilePath),DFS_REPLICATION_DEFAULT + 1);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testFileStatusSerialziation() throws IOException, URISyntaxException {
long len=fileSystemTestHelper.createFile(fHdfs,testfilename);
FileStatus stat=vfs.getFileStatus(new Path(testfilename));
assertEquals(len,stat.getLen());
DataOutputBuffer dob=new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib=new DataInputBuffer();
dib.reset(dob.getData(),0,dob.getLength());
FileStatus deSer=new FileStatus();
deSer.readFields(dib);
assertEquals(len,deSer.getLen());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetFileChecksum() throws IOException, URISyntaxException {
fileSystemTestHelper.createFile(fHdfs,someFile);
fileSystemTestHelper.createFile(fHdfs,fileSystemTestHelper.getTestRootPath(fHdfs,someFile + "other"),1,512);
FileChecksum viewFSCheckSum=vfs.getFileChecksum(new Path("/vfstmp/someFileForTestGetFileChecksum"));
FileChecksum hdfsCheckSum=fHdfs.getFileChecksum(new Path(someFile));
FileChecksum otherHdfsFileCheckSum=fHdfs.getFileChecksum(new Path(someFile + "other"));
assertEquals("HDFS and ViewFS checksums were not the same",viewFSCheckSum,hdfsCheckSum);
assertFalse("Some other HDFS file which should not have had the same " + "checksum as viewFS did!",viewFSCheckSum.equals(otherHdfsFileCheckSum));
}
InternalCallVerifier EqualityVerifier
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the ACL operations to the correct NameNode.
*/
@Test public void testAclOnMountEntry() throws Exception {
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE));
fcView.setAcl(mountOnNn1,aclSpec);
AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)};
assertArrayEquals(expected,aclEntryArray(fcView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected,aclEntryArray(fc.getAclStatus(targetTestRoot)));
aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",READ));
fcView.modifyAclEntries(mountOnNn1,aclSpec);
expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(DEFAULT,USER,READ_WRITE),aclEntry(DEFAULT,USER,"foo",READ),aclEntry(DEFAULT,GROUP,READ),aclEntry(DEFAULT,MASK,READ),aclEntry(DEFAULT,OTHER,NONE)};
assertArrayEquals(expected,aclEntryArray(fcView.getAclStatus(mountOnNn1)));
fcView.removeDefaultAcl(mountOnNn1);
expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)};
assertArrayEquals(expected,aclEntryArray(fcView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected,aclEntryArray(fc.getAclStatus(targetTestRoot)));
assertEquals(0,fcView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0,fc2.getAclStatus(targetTestRoot2).getEntries().size());
fcView.removeAcl(mountOnNn1);
assertEquals(0,fcView.getAclStatus(mountOnNn1).getEntries().size());
assertEquals(0,fc.getAclStatus(targetTestRoot).getEntries().size());
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bar",READ));
fcView.modifyAclEntries(mountOnNn2,aclSpec);
expected=new AclEntry[]{aclEntry(ACCESS,USER,"bar",READ),aclEntry(ACCESS,GROUP,READ_EXECUTE)};
assertArrayEquals(expected,aclEntryArray(fcView.getAclStatus(mountOnNn2)));
assertArrayEquals(expected,aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
fcView.removeAclEntries(mountOnNn2,Lists.newArrayList(aclEntry(ACCESS,USER,"bar",READ)));
expected=new AclEntry[]{aclEntry(ACCESS,GROUP,READ_EXECUTE)};
assertArrayEquals(expected,aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
fcView.removeAcl(mountOnNn2);
assertEquals(0,fcView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0,fc2.getAclStatus(targetTestRoot2).getEntries().size());
}
InternalCallVerifier EqualityVerifier
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the XAttr operations to the correct NameNode.
*/
@Test public void testXAttrOnMountEntry() throws Exception {
fcView.setXAttr(mountOnNn1,name1,value1);
fcView.setXAttr(mountOnNn1,name2,value2);
assertEquals(2,fcView.getXAttrs(mountOnNn1).size());
assertArrayEquals(value1,fcView.getXAttr(mountOnNn1,name1));
assertArrayEquals(value2,fcView.getXAttr(mountOnNn1,name2));
assertArrayEquals(value1,fc.getXAttr(targetTestRoot,name1));
assertArrayEquals(value2,fc.getXAttr(targetTestRoot,name2));
assertEquals(0,fcView.getXAttrs(mountOnNn2).size());
assertEquals(0,fc2.getXAttrs(targetTestRoot2).size());
fcView.removeXAttr(mountOnNn1,name1);
fcView.removeXAttr(mountOnNn1,name2);
assertEquals(0,fcView.getXAttrs(mountOnNn1).size());
assertEquals(0,fc.getXAttrs(targetTestRoot).size());
fcView.setXAttr(mountOnNn2,name1,value1);
fcView.setXAttr(mountOnNn2,name2,value2);
assertEquals(2,fcView.getXAttrs(mountOnNn2).size());
assertArrayEquals(value1,fcView.getXAttr(mountOnNn2,name1));
assertArrayEquals(value2,fcView.getXAttr(mountOnNn2,name2));
assertArrayEquals(value1,fc2.getXAttr(targetTestRoot2,name1));
assertArrayEquals(value2,fc2.getXAttr(targetTestRoot2,name2));
fcView.removeXAttr(mountOnNn2,name1);
fcView.removeXAttr(mountOnNn2,name2);
assertEquals(0,fcView.getXAttrs(mountOnNn2).size());
assertEquals(0,fc2.getXAttrs(targetTestRoot2).size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFileStatusSerialziation() throws IOException, URISyntaxException {
String testfilename="testFileStatusSerialziation";
TEST_DIR.mkdirs();
File infile=new File(TEST_DIR,testfilename);
final byte[] content="dingos".getBytes();
FileOutputStream fos=null;
try {
fos=new FileOutputStream(infile);
fos.write(content);
}
finally {
if (fos != null) {
fos.close();
}
}
assertEquals((long)content.length,infile.length());
Configuration conf=new Configuration();
ConfigUtil.addLink(conf,"/foo/bar/baz",TEST_DIR.toURI());
FileSystem vfs=FileSystem.get(FsConstants.VIEWFS_URI,conf);
assertEquals(ViewFileSystem.class,vfs.getClass());
FileStatus stat=vfs.getFileStatus(new Path("/foo/bar/baz",testfilename));
assertEquals(content.length,stat.getLen());
DataOutputBuffer dob=new DataOutputBuffer();
stat.write(dob);
DataInputBuffer dib=new DataInputBuffer();
dib.reset(dob.getData(),0,dob.getLength());
FileStatus deSer=new FileStatus();
deSer.readFields(dib);
assertEquals(content.length,deSer.getLen());
}
InternalCallVerifier BooleanVerifier
@Test public void testCreateNonRecursive() throws IOException {
Path path=fileSystemTestHelper.getTestRootPath(fsView,"/user/foo");
fsView.createNonRecursive(path,false,1024,(short)1,1024L,null);
FileStatus status=fsView.getFileStatus(new Path("/user/foo"));
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/user/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"user/foo")));
}
InternalCallVerifier BooleanVerifier
/**
* Test modify operations (create, mkdir, delete, etc)
* on the mount file system where the pathname references through
* the mount points. Hence these operation will modify the target
* file system.
* Verify the operation via mountfs (ie fSys) and *also* via the
* target file system (ie fSysLocal) that the mount link points-to.
*/
@Test public void testOperationsThroughMountLinks() throws IOException {
fileSystemTestHelper.createFile(fsView,"/user/foo");
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/user/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"user/foo")));
Assert.assertTrue("Delete should suceed",fsView.delete(new Path("/user/foo"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/user/foo")));
Assert.assertFalse("Target File should not exist after delete",fsTarget.exists(new Path(targetTestRoot,"user/foo")));
fileSystemTestHelper.createFile(fsView,"/internalDir/linkToDir2/foo");
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/internalDir/linkToDir2/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"dir2/foo")));
Assert.assertTrue("Delete should suceed",fsView.delete(new Path("/internalDir/linkToDir2/foo"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/internalDir/linkToDir2/foo")));
Assert.assertFalse("Target File should not exist after delete",fsTarget.exists(new Path(targetTestRoot,"dir2/foo")));
fileSystemTestHelper.createFile(fsView,"/internalDir/internalDir2/linkToDir3/foo");
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"dir3/foo")));
fileSystemTestHelper.createFile(fsView,"/internalDir/linkToDir2/missingDir/miss2/foo");
Assert.assertTrue("Created file should be type file",fsView.isFile(new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
Assert.assertTrue("Target of created file should be type file",fsTarget.isFile(new Path(targetTestRoot,"dir2/missingDir/miss2/foo")));
Assert.assertTrue("Delete should succeed",fsView.delete(new Path("/internalDir/internalDir2/linkToDir3/foo"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertFalse("Target File should not exist after delete",fsTarget.exists(new Path(targetTestRoot,"dir3/foo")));
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirX"));
Assert.assertTrue("New dir should be type dir",fsView.isDirectory(new Path("/user/dirX")));
Assert.assertTrue("Target of new dir should be of type dir",fsTarget.isDirectory(new Path(targetTestRoot,"user/dirX")));
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirX/dirY"));
Assert.assertTrue("New dir should be type dir",fsView.isDirectory(new Path("/user/dirX/dirY")));
Assert.assertTrue("Target of new dir should be of type dir",fsTarget.isDirectory(new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",fsView.delete(new Path("/user/dirX/dirY"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/user/dirX/dirY")));
Assert.assertFalse("Target File should not exist after delete",fsTarget.exists(new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",fsView.delete(new Path("/user/dirX"),false));
Assert.assertFalse("File should not exist after delete",fsView.exists(new Path("/user/dirX")));
Assert.assertFalse(fsTarget.exists(new Path(targetTestRoot,"user/dirX")));
fileSystemTestHelper.createFile(fsView,"/user/foo");
fsView.rename(new Path("/user/foo"),new Path("/user/fooBar"));
Assert.assertFalse("Renamed src should not exist",fsView.exists(new Path("/user/foo")));
Assert.assertFalse("Renamed src should not exist in target",fsTarget.exists(new Path(targetTestRoot,"user/foo")));
Assert.assertTrue("Renamed dest should exist as file",fsView.isFile(fileSystemTestHelper.getTestRootPath(fsView,"/user/fooBar")));
Assert.assertTrue("Renamed dest should exist as file in target",fsTarget.isFile(new Path(targetTestRoot,"user/fooBar")));
fsView.mkdirs(new Path("/user/dirFoo"));
fsView.rename(new Path("/user/dirFoo"),new Path("/user/dirFooBar"));
Assert.assertFalse("Renamed src should not exist",fsView.exists(new Path("/user/dirFoo")));
Assert.assertFalse("Renamed src should not exist in target",fsTarget.exists(new Path(targetTestRoot,"user/dirFoo")));
Assert.assertTrue("Renamed dest should exist as dir",fsView.isDirectory(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirFooBar")));
Assert.assertTrue("Renamed dest should exist as dir in target",fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
fsView.mkdirs(new Path("/targetRoot/dirFoo"));
Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo")));
boolean dirFooPresent=false;
for ( FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) {
if (fileStatus.getPath().getName().equals("dirFoo")) {
dirFooPresent=true;
}
}
Assert.assertTrue(dirFooPresent);
}
InternalCallVerifier BooleanVerifier
@Test public void testRootReadableExecutable() throws IOException {
Assert.assertFalse("In root before cd",fsView.getWorkingDirectory().isRoot());
fsView.setWorkingDirectory(new Path("/"));
Assert.assertTrue("Not in root dir after cd",fsView.getWorkingDirectory().isRoot());
verifyRootChildren(fsView.listStatus(fsView.getWorkingDirectory()));
final FileStatus rootStatus=fsView.getFileStatus(fsView.getWorkingDirectory());
final FsPermission perms=rootStatus.getPermission();
Assert.assertTrue("User-executable permission not set!",perms.getUserAction().implies(FsAction.EXECUTE));
Assert.assertTrue("User-readable permission not set!",perms.getUserAction().implies(FsAction.READ));
Assert.assertTrue("Group-executable permission not set!",perms.getGroupAction().implies(FsAction.EXECUTE));
Assert.assertTrue("Group-readable permission not set!",perms.getGroupAction().implies(FsAction.READ));
Assert.assertTrue("Other-executable permission not set!",perms.getOtherAction().implies(FsAction.EXECUTE));
Assert.assertTrue("Other-readable permission not set!",perms.getOtherAction().implies(FsAction.READ));
}
InternalCallVerifier EqualityVerifier
@Test public void testResolvePathThroughMountPoints() throws IOException {
fileSystemTestHelper.createFile(fsView,"/user/foo");
Assert.assertEquals(new Path(targetTestRoot,"user/foo"),fsView.resolvePath(new Path("/user/foo")));
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirX"));
Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),fsView.resolvePath(new Path("/user/dirX")));
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirX/dirY"));
Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),fsView.resolvePath(new Path("/user/dirX/dirY")));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetBlockLocations() throws IOException {
Path targetFilePath=new Path(targetTestRoot,"data/largeFile");
FileSystemTestHelper.createFile(fsTarget,targetFilePath,10,1024);
Path viewFilePath=new Path("/data/largeFile");
Assert.assertTrue("Created File should be type File",fsView.isFile(viewFilePath));
BlockLocation[] viewBL=fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath),0,10240 + 100);
Assert.assertEquals(SupportsBlocks ? 10 : 1,viewBL.length);
BlockLocation[] targetBL=fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath),0,10240 + 100);
compareBLs(viewBL,targetBL);
fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath),0,10240 + 100);
targetBL=fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath),0,10240 + 100);
compareBLs(viewBL,targetBL);
}
InternalCallVerifier EqualityVerifier
@Test public void testResolvePathMountPoints() throws IOException {
Assert.assertEquals(new Path(targetTestRoot,"user"),fsView.resolvePath(new Path("/user")));
Assert.assertEquals(new Path(targetTestRoot,"data"),fsView.resolvePath(new Path("/data")));
Assert.assertEquals(new Path(targetTestRoot,"dir2"),fsView.resolvePath(new Path("/internalDir/linkToDir2")));
Assert.assertEquals(new Path(targetTestRoot,"dir3"),fsView.resolvePath(new Path("/internalDir/internalDir2/linkToDir3")));
}
InternalCallVerifier EqualityVerifier
@Test public void testResolvePathInternalPaths() throws IOException {
Assert.assertEquals(new Path("/"),fsView.resolvePath(new Path("/")));
Assert.assertEquals(new Path("/internalDir"),fsView.resolvePath(new Path("/internalDir")));
}
InternalCallVerifier EqualityVerifier
@Test public void testGetMountPoints(){
ViewFileSystem viewfs=(ViewFileSystem)fsView;
MountPoint[] mountPoints=viewfs.getMountPoints();
Assert.assertEquals(getExpectedMountPoints(),mountPoints.length);
}
InternalCallVerifier EqualityVerifier
@Test public void testBasicPaths(){
Assert.assertEquals(FsConstants.VIEWFS_URI,fsView.getUri());
Assert.assertEquals(fsView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fsView.getWorkingDirectory());
Assert.assertEquals(fsView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fsView.getHomeDirectory());
Assert.assertEquals(new Path("/foo/bar").makeQualified(FsConstants.VIEWFS_URI,null),fsView.makeQualified(new Path("/foo/bar")));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser();
AclStatus aclStatus=fsView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(),currentUser.getUserName());
assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testListOnMountTargetDirs() throws IOException {
FileStatus[] dirPaths=fsView.listStatus(new Path("/data"));
FileStatus fs;
Assert.assertEquals(0,dirPaths.length);
long len=fileSystemTestHelper.createFile(fsView,"/data/foo");
dirPaths=fsView.listStatus(new Path("/data"));
Assert.assertEquals(1,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created file shoudl appear as a file",fs.isFile());
Assert.assertEquals(len,fs.getLen());
fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/data/dirX"));
dirPaths=fsView.listStatus(new Path("/data"));
Assert.assertEquals(2,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created file shoudl appear as a file",fs.isFile());
fs=fileSystemTestHelper.containsPath(fsView,"/data/dirX",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("Created dir should appear as a dir",fs.isDirectory());
}
InternalCallVerifier EqualityVerifier
@Test public void testGetDelegationTokensWithCredentials() throws IOException {
Credentials credentials=new Credentials();
List> delTokens=Arrays.asList(fsView.addDelegationTokens("sanjay",credentials));
int expectedTokenCount=getExpectedDelegationTokenCountWithCredentials();
Assert.assertEquals(expectedTokenCount,delTokens.size());
Credentials newCredentials=new Credentials();
for (int i=0; i < expectedTokenCount / 2; i++) {
Token> token=delTokens.get(i);
newCredentials.addToken(token.getService(),token);
}
List> delTokens2=Arrays.asList(fsView.addDelegationTokens("sanjay",newCredentials));
Assert.assertEquals((expectedTokenCount + 1) / 2,delTokens2.size());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
@Test public void testListOnInternalDirsOfMountTable() throws IOException {
FileStatus[] dirPaths=fsView.listStatus(new Path("/"));
FileStatus fs;
verifyRootChildren(dirPaths);
dirPaths=fsView.listStatus(new Path("/internalDir"));
Assert.assertEquals(2,dirPaths.length);
fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/internalDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isDirectory());
fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/linkToDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
}
InternalCallVerifier EqualityVerifier
/**
* This default implementation is when viewfs has mount points
* into file systems, such as LocalFs that do no have delegation tokens.
* It should be overridden for when mount points into hdfs.
*/
@Test public void testGetDelegationTokens() throws IOException {
Token>[] delTokens=fsView.addDelegationTokens("sanjay",new Credentials());
Assert.assertEquals(getExpectedDelegationTokenCount(),delTokens.length);
}
InternalCallVerifier EqualityVerifier
/**
* This default implementation is when viewfs has mount points
* into file systems, such as LocalFs that do no have delegation tokens.
* It should be overridden for when mount points into hdfs.
*/
@Test public void testGetDelegationTokens() throws IOException {
List> delTokens=fcView.getDelegationTokens(new Path("/"),"sanjay");
Assert.assertEquals(getExpectedDelegationTokenCount(),delTokens.size());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser();
AclStatus aclStatus=fcView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(),currentUser.getUserName());
assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
InternalCallVerifier EqualityVerifier
@Test public void testResolvePathMountPoints() throws IOException {
Assert.assertEquals(new Path(targetTestRoot,"user"),fcView.resolvePath(new Path("/user")));
Assert.assertEquals(new Path(targetTestRoot,"data"),fcView.resolvePath(new Path("/data")));
Assert.assertEquals(new Path(targetTestRoot,"dir2"),fcView.resolvePath(new Path("/internalDir/linkToDir2")));
Assert.assertEquals(new Path(targetTestRoot,"dir3"),fcView.resolvePath(new Path("/internalDir/internalDir2/linkToDir3")));
}
InternalCallVerifier EqualityVerifier
@Test public void testSymlinkTarget() throws IOException {
Assert.assertEquals(fcView.getLinkTarget(new Path("/user")),(new Path(targetTestRoot,"user")));
Assert.assertEquals(fcView.getLinkTarget(new Path("/data")),(new Path(targetTestRoot,"data")));
Assert.assertEquals(fcView.getLinkTarget(new Path("/internalDir/linkToDir2")),(new Path(targetTestRoot,"dir2")));
Assert.assertEquals(fcView.getLinkTarget(new Path("/internalDir/internalDir2/linkToDir3")),(new Path(targetTestRoot,"dir3")));
Assert.assertEquals(fcView.getLinkTarget(new Path("/linkToAFile")),(new Path(targetTestRoot,"aFile")));
}
InternalCallVerifier EqualityVerifier
@Test public void testGetMountPoints(){
ViewFs viewfs=(ViewFs)fcView.getDefaultFileSystem();
MountPoint[] mountPoints=viewfs.getMountPoints();
Assert.assertEquals(8,mountPoints.length);
}
InternalCallVerifier EqualityVerifier
@Test public void testResolvePathThroughMountPoints() throws IOException {
fileContextTestHelper.createFile(fcView,"/user/foo");
Assert.assertEquals(new Path(targetTestRoot,"user/foo"),fcView.resolvePath(new Path("/user/foo")));
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,"/user/dirX"),FileContext.DEFAULT_PERM,false);
Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),fcView.resolvePath(new Path("/user/dirX")));
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,"/user/dirX/dirY"),FileContext.DEFAULT_PERM,false);
Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),fcView.resolvePath(new Path("/user/dirX/dirY")));
}
InternalCallVerifier EqualityVerifier
@Test public void testResolvePathInternalPaths() throws IOException {
Assert.assertEquals(new Path("/"),fcView.resolvePath(new Path("/")));
Assert.assertEquals(new Path("/internalDir"),fcView.resolvePath(new Path("/internalDir")));
}
InternalCallVerifier BooleanVerifier
/**
* Test modify operations (create, mkdir, delete, etc)
* on the mount file system where the pathname references through
* the mount points. Hence these operation will modify the target
* file system.
* Verify the operation via mountfs (ie fc) and *also* via the
* target file system (ie fclocal) that the mount link points-to.
*/
@Test public void testOperationsThroughMountLinks() throws IOException {
fileContextTestHelper.createFileNonRecursive(fcView,"/user/foo");
Assert.assertTrue("Create file should be file",isFile(fcView,new Path("/user/foo")));
Assert.assertTrue("Target of created file should be type file",isFile(fcTarget,new Path(targetTestRoot,"user/foo")));
Assert.assertTrue("Delete should succeed",fcView.delete(new Path("/user/foo"),false));
Assert.assertFalse("File should not exist after delete",exists(fcView,new Path("/user/foo")));
Assert.assertFalse("Target File should not exist after delete",exists(fcTarget,new Path(targetTestRoot,"user/foo")));
fileContextTestHelper.createFileNonRecursive(fcView,"/internalDir/linkToDir2/foo");
Assert.assertTrue("Created file should be type file",isFile(fcView,new Path("/internalDir/linkToDir2/foo")));
Assert.assertTrue("Target of created file should be type file",isFile(fcTarget,new Path(targetTestRoot,"dir2/foo")));
Assert.assertTrue("Delete should suceed",fcView.delete(new Path("/internalDir/linkToDir2/foo"),false));
Assert.assertFalse("File should not exist after deletion",exists(fcView,new Path("/internalDir/linkToDir2/foo")));
Assert.assertFalse("Target should not exist after deletion",exists(fcTarget,new Path(targetTestRoot,"dir2/foo")));
fileContextTestHelper.createFileNonRecursive(fcView,"/internalDir/internalDir2/linkToDir3/foo");
Assert.assertTrue("Created file should be of type file",isFile(fcView,new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertTrue("Target of created file should also be type file",isFile(fcTarget,new Path(targetTestRoot,"dir3/foo")));
fileContextTestHelper.createFile(fcView,"/internalDir/linkToDir2/missingDir/miss2/foo");
Assert.assertTrue("Created file should be of type file",isFile(fcView,new Path("/internalDir/linkToDir2/missingDir/miss2/foo")));
Assert.assertTrue("Target of created file should also be type file",isFile(fcTarget,new Path(targetTestRoot,"dir2/missingDir/miss2/foo")));
Assert.assertTrue("Delete should succeed",fcView.delete(new Path("/internalDir/internalDir2/linkToDir3/foo"),false));
Assert.assertFalse("Deleted File should not exist",exists(fcView,new Path("/internalDir/internalDir2/linkToDir3/foo")));
Assert.assertFalse("Target of deleted file should not exist",exists(fcTarget,new Path(targetTestRoot,"dir3/foo")));
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,"/user/dirX"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue("New dir should be type dir",isDir(fcView,new Path("/user/dirX")));
Assert.assertTrue("Target of new dir should be of type dir",isDir(fcTarget,new Path(targetTestRoot,"user/dirX")));
fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,"/user/dirX/dirY"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue("New dir should be type dir",isDir(fcView,new Path("/user/dirX/dirY")));
Assert.assertTrue("Target of new dir should be of type dir",isDir(fcTarget,new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",fcView.delete(new Path("/user/dirX/dirY"),false));
Assert.assertFalse("Deleted File should not exist",exists(fcView,new Path("/user/dirX/dirY")));
Assert.assertFalse("Deleted Target should not exist",exists(fcTarget,new Path(targetTestRoot,"user/dirX/dirY")));
Assert.assertTrue("Delete should succeed",fcView.delete(new Path("/user/dirX"),false));
Assert.assertFalse("Deleted File should not exist",exists(fcView,new Path("/user/dirX")));
Assert.assertFalse("Deleted Target should not exist",exists(fcTarget,new Path(targetTestRoot,"user/dirX")));
fileContextTestHelper.createFile(fcView,"/user/foo");
fcView.rename(new Path("/user/foo"),new Path("/user/fooBar"));
Assert.assertFalse("Renamed src should not exist",exists(fcView,new Path("/user/foo")));
Assert.assertFalse(exists(fcTarget,new Path(targetTestRoot,"user/foo")));
Assert.assertTrue(isFile(fcView,fileContextTestHelper.getTestRootPath(fcView,"/user/fooBar")));
Assert.assertTrue(isFile(fcTarget,new Path(targetTestRoot,"user/fooBar")));
fcView.mkdir(new Path("/user/dirFoo"),FileContext.DEFAULT_PERM,false);
fcView.rename(new Path("/user/dirFoo"),new Path("/user/dirFooBar"));
Assert.assertFalse("Renamed src should not exist",exists(fcView,new Path("/user/dirFoo")));
Assert.assertFalse("Renamed src should not exist in target",exists(fcTarget,new Path(targetTestRoot,"user/dirFoo")));
Assert.assertTrue("Renamed dest should exist as dir",isDir(fcView,fileContextTestHelper.getTestRootPath(fcView,"/user/dirFooBar")));
Assert.assertTrue("Renamed dest should exist as dir in target",isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
fcView.mkdir(new Path("/targetRoot/dirFoo"),FileContext.DEFAULT_PERM,false);
Assert.assertTrue(exists(fcView,new Path("/targetRoot/dirFoo")));
boolean dirFooPresent=false;
RemoteIterator dirContents=fcView.listStatus(new Path("/targetRoot/"));
while (dirContents.hasNext()) {
FileStatus fileStatus=dirContents.next();
if (fileStatus.getPath().getName().equals("dirFoo")) {
dirFooPresent=true;
}
}
Assert.assertTrue(dirFooPresent);
}
InternalCallVerifier EqualityVerifier
@Test public void testBasicPaths(){
Assert.assertEquals(FsConstants.VIEWFS_URI,fcView.getDefaultFileSystem().getUri());
Assert.assertEquals(fcView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fcView.getWorkingDirectory());
Assert.assertEquals(fcView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fcView.getHomeDirectory());
Assert.assertEquals(new Path("/foo/bar").makeQualified(FsConstants.VIEWFS_URI,null),fcView.makeQualified(new Path("/foo/bar")));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test "readOps" (e.g. list, listStatus)
* on internal dirs of mount table
* These operations should succeed.
*/
@Test public void testListOnInternalDirsOfMountTable() throws IOException {
FileStatus[] dirPaths=fcView.util().listStatus(new Path("/"));
FileStatus fs;
Assert.assertEquals(7,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcView,"/user",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/data",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/internalDir",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcView,"/danglingLink",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
fs=fileContextTestHelper.containsPath(fcView,"/linkToAFile",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
dirPaths=fcView.util().listStatus(new Path("/internalDir"));
Assert.assertEquals(2,dirPaths.length);
fs=fileContextTestHelper.containsPath(fcView,"/internalDir/internalDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory());
fs=fileContextTestHelper.containsPath(fcView,"/internalDir/linkToDir2",dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink",fs.isSymlink());
}
InternalCallVerifier EqualityVerifier
@Test public void testGetBlockLocations() throws IOException {
Path targetFilePath=new Path(targetTestRoot,"data/largeFile");
FileContextTestHelper.createFile(fcTarget,targetFilePath,10,1024);
Path viewFilePath=new Path("/data/largeFile");
checkFileStatus(fcView,viewFilePath.toString(),fileType.isFile);
BlockLocation[] viewBL=fcView.getFileBlockLocations(viewFilePath,0,10240 + 100);
Assert.assertEquals(SupportsBlocks ? 10 : 1,viewBL.length);
BlockLocation[] targetBL=fcTarget.getFileBlockLocations(targetFilePath,0,10240 + 100);
compareBLs(viewBL,targetBL);
fcView.getFileBlockLocations(viewFilePath,0,10240 + 100);
targetBL=fcTarget.getFileBlockLocations(targetFilePath,0,10240 + 100);
compareBLs(viewBL,targetBL);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that, when the callback fails to enter active state,
* the elector rejoins the election after sleeping for a short period.
*/
@Test public void testFailToBecomeActive() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0,elector.sleptFor);
Mockito.doThrow(new ServiceFailedException("failed to become active")).when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
Mockito.verify(mockApp).becomeActive();
Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
Assert.assertEquals(2,count);
Assert.assertTrue(elector.sleptFor > 0);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that, when the callback fails to enter active state, after
* a ZK disconnect (i.e from the StatCallback), that the elector rejoins
* the election after sleeping for a short period.
*/
@Test public void testFailToBecomeActiveAfterZKDisconnect() throws Exception {
mockNoPriorActive();
elector.joinElection(data);
Assert.assertEquals(0,elector.sleptFor);
elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME);
verifyExistCall(1);
Stat stat=new Stat();
stat.setEphemeralOwner(1L);
Mockito.when(mockZK.getSessionId()).thenReturn(1L);
Mockito.doThrow(new ServiceFailedException("fail to become active")).when(mockApp).becomeActive();
elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,stat);
Mockito.verify(mockApp,Mockito.times(1)).becomeActive();
Mockito.verify(mockZK,Mockito.times(3)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK);
Assert.assertEquals(2,count);
Assert.assertTrue(elector.sleptFor > 0);
}
InternalCallVerifier BooleanVerifier
/**
* Test that the proper state is propagated when the health monitor
* sees an uncaught exception in its thread.
*/
@Test(timeout=15000) public void testHealthMonitorDies() throws Exception {
LOG.info("Mocking RTE in health monitor, waiting for FAILED");
throwOOMEOnCreate=true;
svc.actUnreachable=true;
waitForState(hm,HealthMonitor.State.HEALTH_MONITOR_FAILED);
hm.shutdown();
hm.join();
assertFalse(hm.isAlive());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=15000) public void testMonitor() throws Exception {
LOG.info("Mocking bad health check, waiting for UNHEALTHY");
svc.isHealthy=false;
waitForState(hm,HealthMonitor.State.SERVICE_UNHEALTHY);
LOG.info("Returning to healthy state, waiting for HEALTHY");
svc.isHealthy=true;
waitForState(hm,HealthMonitor.State.SERVICE_HEALTHY);
LOG.info("Returning an IOException, as if node went down");
int countBefore=createProxyCount.get();
svc.actUnreachable=true;
waitForState(hm,HealthMonitor.State.SERVICE_NOT_RESPONDING);
while (createProxyCount.get() < countBefore + 3) {
Thread.sleep(10);
}
LOG.info("Returning to healthy state, waiting for HEALTHY");
svc.actUnreachable=false;
waitForState(hm,HealthMonitor.State.SERVICE_HEALTHY);
hm.shutdown();
hm.join();
assertFalse(hm.isAlive());
}
InternalCallVerifier BooleanVerifier
/**
* Test that the exit code of the script determines
* whether the fencer succeeded or failed
*/
@Test public void testBasicSuccessFailure(){
assertTrue(fencer.tryFence(TEST_TARGET,"echo"));
assertFalse(fencer.tryFence(TEST_TARGET,"exit 1"));
assertFalse(fencer.tryFence(TEST_TARGET,"xxxxxxxxxxxx"));
}
InternalCallVerifier BooleanVerifier
/**
* Test connecting to a host which definitely won't respond.
* Make sure that it times out and returns false, but doesn't throw
* any exception
*/
@Test(timeout=20000) public void testConnectTimeout() throws BadFencingConfigurationException {
Configuration conf=new Configuration();
conf.setInt(SshFenceByTcpPort.CONF_CONNECT_TIMEOUT_KEY,3000);
SshFenceByTcpPort fence=new SshFenceByTcpPort();
fence.setConf(conf);
assertFalse(fence.tryFence(UNFENCEABLE_TARGET,""));
}
InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=20000) public void testFence() throws BadFencingConfigurationException {
Assume.assumeTrue(isConfigured());
Configuration conf=new Configuration();
conf.set(SshFenceByTcpPort.CONF_IDENTITIES_KEY,TEST_KEYFILE);
SshFenceByTcpPort fence=new SshFenceByTcpPort();
fence.setConf(conf);
assertTrue(fence.tryFence(TEST_TARGET,null));
}
InternalCallVerifier EqualityVerifier
/**
* Test that the various command lines for formatting the ZK directory
* function correctly.
*/
@Test(timeout=15000) public void testFormatZK() throws Exception {
DummyHAService svc=cluster.getService(1);
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,runFC(svc));
assertEquals(0,runFC(svc,"-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,runFC(svc,"-formatZK","-nonInteractive"));
assertEquals(0,runFC(svc,"-formatZK","-force"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that automatic failover won't run against a target that hasn't
* explicitly enabled the feature.
*/
@Test(timeout=10000) public void testWontRunWhenAutoFailoverDisabled() throws Exception {
DummyHAService svc=cluster.getService(1);
svc=Mockito.spy(svc);
Mockito.doReturn(false).when(svc).isAutoFailoverEnabled();
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,runFC(svc,"-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,runFC(svc));
}
InternalCallVerifier EqualityVerifier
/**
* Test that the ZKFC won't run if fencing is not configured for the
* local service.
*/
@Test(timeout=15000) public void testFencingMustBeConfigured() throws Exception {
DummyHAService svc=Mockito.spy(cluster.getService(0));
Mockito.doThrow(new BadFencingConfigurationException("no fencing")).when(svc).checkFencingConfigured();
assertEquals(0,runFC(svc,"-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_NO_FENCER,runFC(svc));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=25000) public void testGracefulFailover() throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
cluster.getService(1).getZKFCProxy(conf,5000).gracefulFailover();
cluster.waitForActiveLockHolder(1);
cluster.getService(0).getZKFCProxy(conf,5000).gracefulFailover();
cluster.waitForActiveLockHolder(0);
Thread.sleep(10000);
assertEquals(0,cluster.getService(0).fenceCount);
assertEquals(0,cluster.getService(1).fenceCount);
assertEquals(2,cluster.getService(0).activeTransitionCount);
assertEquals(1,cluster.getService(1).activeTransitionCount);
}
finally {
cluster.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=15000) public void testGracefulFailoverFailBecomingStandby() throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
cluster.setFailToBecomeStandby(0,true);
cluster.getService(1).getZKFCProxy(conf,5000).gracefulFailover();
assertEquals(1,cluster.getService(0).fenceCount);
}
finally {
cluster.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testFormatOneClusterLeavesOtherClustersAlone() throws Exception {
DummyHAService svc=cluster.getService(1);
DummyZKFC zkfcInOtherCluster=new DummyZKFC(conf,cluster.getService(1)){
@Override protected String getScopeInsideParentNode(){
return "other-scope";
}
}
;
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,runFC(svc));
assertEquals(0,runFC(svc,"-formatZK"));
assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,zkfcInOtherCluster.run(new String[]{}));
assertEquals(0,zkfcInOtherCluster.run(new String[]{"-formatZK"}));
assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,runFC(svc,"-formatZK","-nonInteractive"));
}
InternalCallVerifier EqualityVerifier
/**
* Test that if ZooKeeper is not running, the correct error
* code is returned.
*/
@Test(timeout=15000) public void testNoZK() throws Exception {
stopServer();
DummyHAService svc=cluster.getService(1);
assertEquals(ZKFailoverController.ERR_CODE_NO_ZK,runFC(svc));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGracefulFailoverFailBecomingActive() throws Exception {
try {
cluster.start();
cluster.waitForActiveLockHolder(0);
cluster.setFailToBecomeActive(1,true);
try {
cluster.getService(1).getZKFCProxy(conf,5000).gracefulFailover();
fail("Did not fail to graceful failover when target failed " + "to become active!");
}
catch ( ServiceFailedException sfe) {
GenericTestUtils.assertExceptionContains("Couldn't make " + cluster.getService(1) + " active",sfe);
GenericTestUtils.assertExceptionContains("injected failure",sfe);
}
assertEquals(0,cluster.getService(0).fenceCount);
assertEquals(0,cluster.getService(1).fenceCount);
cluster.waitForActiveLockHolder(0);
}
finally {
cluster.stop();
}
}
InternalCallVerifier EqualityVerifier
/**
* Test that, when ZooKeeper fails, the system remains in its
* current state, without triggering any failovers, and without
* causing the active node to enter standby state.
*/
@Test(timeout=15000) public void testZooKeeperFailure() throws Exception {
try {
cluster.start();
long session0=cluster.getElector(0).getZKSessionIdForTests();
long session1=cluster.getElector(1).getZKSessionIdForTests();
LOG.info("====== Stopping ZK server");
stopServer();
waitForServerDown(hostPort,CONNECTION_TIMEOUT);
LOG.info("====== Waiting for services to enter NEUTRAL mode");
cluster.waitForElectorState(0,ActiveStandbyElector.State.NEUTRAL);
cluster.waitForElectorState(1,ActiveStandbyElector.State.NEUTRAL);
LOG.info("====== Checking that the services didn't change HA state");
assertEquals(HAServiceState.ACTIVE,cluster.getService(0).state);
assertEquals(HAServiceState.STANDBY,cluster.getService(1).state);
LOG.info("====== Restarting server");
startServer();
waitForServerUp(hostPort,CONNECTION_TIMEOUT);
cluster.waitForElectorState(0,ActiveStandbyElector.State.ACTIVE);
cluster.waitForElectorState(1,ActiveStandbyElector.State.STANDBY);
cluster.waitForHAState(0,HAServiceState.ACTIVE);
cluster.waitForHAState(1,HAServiceState.STANDBY);
assertEquals(session0,cluster.getElector(0).getZKSessionIdForTests());
assertEquals(session1,cluster.getElector(1).getZKSessionIdForTests());
}
finally {
cluster.stop();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the ZKFC can gracefully cede its active status.
*/
@Test(timeout=15000) public void testCedeActive() throws Exception {
try {
cluster.start();
DummyZKFC zkfc=cluster.getZkfc(0);
assertEquals(ActiveStandbyElector.State.ACTIVE,zkfc.getElectorForTests().getStateForTests());
ZKFCProtocol proxy=zkfc.getLocalTarget().getZKFCProxy(conf,5000);
long st=Time.now();
proxy.cedeActive(3000);
long et=Time.now();
assertTrue("RPC to cedeActive took " + (et - st) + " ms",et - st < 1000);
assertEquals(ActiveStandbyElector.State.INIT,zkfc.getElectorForTests().getStateForTests());
cluster.waitForElectorState(0,ActiveStandbyElector.State.STANDBY);
long et2=Time.now();
assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) + "ms before rejoining.",et2 - et > 2800);
}
finally {
cluster.stop();
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test that, if ACLs are specified in the configuration, that
* it sets the ACLs when formatting the parent node.
*/
@Test(timeout=15000) public void testFormatSetsAcls() throws Exception {
DummyHAService svc=cluster.getService(1);
assertEquals(0,runFC(svc,"-formatZK"));
ZooKeeper otherClient=createClient();
try {
Stat stat=new Stat();
otherClient.getData(ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,false,stat);
fail("Was able to read data without authenticating!");
}
catch ( KeeperException.NoAuthException nae) {
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAbandonBlock() throws IOException {
String src=FILE_NAME_PREFIX + "foo";
FSDataOutputStream fout=fs.create(new Path(src),true,4096,(short)1,512L);
for (int i=0; i < 1024; i++) {
fout.write(123);
}
fout.hflush();
long fileId=((DFSOutputStream)fout.getWrappedStream()).getFileId();
DFSClient dfsclient=DFSClientAdapter.getDFSClient(fs);
LocatedBlocks blocks=dfsclient.getNamenode().getBlockLocations(src,0,Integer.MAX_VALUE);
int orginalNumBlocks=blocks.locatedBlockCount();
LocatedBlock b=blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(),fileId,src,dfsclient.clientName);
dfsclient.getNamenode().abandonBlock(b.getBlock(),fileId,src,dfsclient.clientName);
fout.close();
cluster.restartNameNode();
blocks=dfsclient.getNamenode().getBlockLocations(src,0,Integer.MAX_VALUE);
Assert.assertEquals("Blocks " + b + " has not been abandoned.",orginalNumBlocks,blocks.locatedBlockCount() + 1);
}
InternalCallVerifier EqualityVerifier
@Test public void testBalancerBandwidth() throws Exception {
conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,DEFAULT_BANDWIDTH);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
try {
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
ArrayList datanodes=cluster.getDataNodes();
assertEquals(DEFAULT_BANDWIDTH,(long)datanodes.get(0).getBalancerBandwidth());
assertEquals(DEFAULT_BANDWIDTH,(long)datanodes.get(1).getBalancerBandwidth());
long newBandwidth=12 * DEFAULT_BANDWIDTH;
fs.setBalancerBandwidth(newBandwidth);
try {
Thread.sleep(5000);
}
catch ( Exception e) {
}
assertEquals(newBandwidth,(long)datanodes.get(0).getBalancerBandwidth());
assertEquals(newBandwidth,(long)datanodes.get(1).getBalancerBandwidth());
fs.setBalancerBandwidth(0);
try {
Thread.sleep(5000);
}
catch ( Exception e) {
}
assertEquals(newBandwidth,(long)datanodes.get(0).getBalancerBandwidth());
assertEquals(newBandwidth,(long)datanodes.get(1).getBalancerBandwidth());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
/**
* Test shutting down the ShortCircuitCache while there are things in it.
*/
@Test public void testShortCircuitCacheShutdown() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testShortCircuitCacheShutdown",sockDir);
conf.set(DFS_CLIENT_CONTEXT,"testShortCircuitCacheShutdown");
Configuration serverConf=new Configuration(conf);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),conf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
cache.close();
Assert.assertTrue(cache.getDfsClientShmManager().getDomainSocketWatcher().isClosed());
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* If we have a UNIX domain socket configured,
* and we have dfs.client.domain.socket.data.traffic set to true,
* and short-circuit access fails, we should still be able to pass
* data traffic over the UNIX domain socket. Test this.
*/
@Test(timeout=60000) public void testFallbackFromShortCircuitToUnixDomainTraffic() throws Exception {
DFSInputStream.tcpReadsDisabledForTesting=true;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testFallbackFromShortCircuitToUnixDomainTraffic",sockDir);
clientConf.set(DFS_CLIENT_CONTEXT,"testFallbackFromShortCircuitToUnixDomainTraffic_clientContext");
clientConf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,true);
Configuration serverConf=new Configuration(clientConf);
serverConf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY,false);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
FileSystem dfs=FileSystem.get(cluster.getURI(0),clientConf);
String TEST_FILE="/test_file";
final int TEST_FILE_LEN=8193;
final int SEED=0xFADED;
DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(dfs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadFromServerWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(1,info.size());
PerDatanodeVisitorInfo vinfo=info.get(datanode);
Assert.assertTrue(vinfo.disabled);
Assert.assertEquals(0,vinfo.full.size());
Assert.assertEquals(0,vinfo.notFull.size());
}
}
);
cluster.shutdown();
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the case where we have multiple threads waiting on the
* ShortCircuitCache delivering a certain ShortCircuitReplica.
* In this case, there should only be one call to
* createShortCircuitReplicaInfo. This one replica should be shared
* by all threads.
*/
@Test(timeout=60000) public void testMultipleWaitersOnShortCircuitCache() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
final AtomicBoolean creationIsBlocked=new AtomicBoolean(true);
final AtomicBoolean testFailed=new AtomicBoolean(false);
DFSInputStream.tcpReadsDisabledForTesting=true;
BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
Uninterruptibles.awaitUninterruptibly(latch);
if (!creationIsBlocked.compareAndSet(true,false)) {
Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo. Only one was expected.");
}
return null;
}
}
;
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testMultipleWaitersOnShortCircuitCache",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADED;
final int NUM_THREADS=10;
DFSTestUtil.createFile(dfs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
Runnable readerRunnable=new Runnable(){
@Override public void run(){
try {
byte contents[]=DFSTestUtil.readFileBuffer(dfs,new Path(TEST_FILE));
Assert.assertFalse(creationIsBlocked.get());
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
}
catch ( Throwable e) {
LOG.error("readerRunnable error",e);
testFailed.set(true);
}
}
}
;
Thread threads[]=new Thread[NUM_THREADS];
for (int i=0; i < NUM_THREADS; i++) {
threads[i]=new Thread(readerRunnable);
threads[i].start();
}
Thread.sleep(500);
latch.countDown();
for (int i=0; i < NUM_THREADS; i++) {
Uninterruptibles.joinUninterruptibly(threads[i]);
}
cluster.shutdown();
sockDir.close();
Assert.assertFalse(testFailed.get());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test that a client which does not support short-circuit reads using
* shared memory can talk with a server which supports it.
*/
@Test public void testShortCircuitReadFromClientWithoutShm() throws Exception {
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration clientConf=createShortCircuitConf("testShortCircuitReadWithoutShm",sockDir);
Configuration serverConf=new Configuration(clientConf);
DFSInputStream.tcpReadsDisabledForTesting=true;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0);
clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromClientWithoutShm_clientContext");
final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf);
final String TEST_FILE="/test_file";
final int TEST_FILE_LEN=4000;
final int SEED=0xFADEC;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE));
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache();
Assert.assertEquals(null,cache.getDfsClientShmManager());
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that, in the case of an error, the position and limit of a ByteBuffer
* are left unchanged. This is not mandated by ByteBufferReadable, but clients
* of this class might immediately issue a retry on failure, so it's polite.
*/
@Test public void testStablePositionAfterCorruptRead() throws Exception {
final short REPL_FACTOR=1;
final long FILE_LENGTH=512L;
HdfsConfiguration conf=getConfiguration(null);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/corrupted");
DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L);
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted);
FSDataInputStream dis=cluster.getFileSystem().open(path);
ByteBuffer buf=ByteBuffer.allocateDirect((int)FILE_LENGTH);
boolean sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(0,buf.position());
assertEquals(buf.capacity(),buf.limit());
dis=cluster.getFileSystem().open(path);
buf.position(3);
buf.limit(25);
sawException=false;
try {
dis.read(buf);
}
catch ( ChecksumException ex) {
sawException=true;
}
assertTrue(sawException);
assertEquals(3,buf.position());
assertEquals(25,buf.limit());
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testBlocksScheduledCounter() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out=fs.create(new Path("/testBlockScheduledCounter"));
for (int i=0; i < 1024; i++) {
out.write(i);
}
out.hflush();
ArrayList dnList=new ArrayList();
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
dm.fetchDatanodes(dnList,dnList,false);
DatanodeDescriptor dn=dnList.get(0);
assertEquals(1,dn.getBlocksScheduled());
out.close();
assertEquals(0,dn.getBlocksScheduled());
}
InternalCallVerifier EqualityVerifier
/**
* Test recovery on restart OOB message. It also tests the delivery of
* OOB ack originating from the primary datanode. Since there is only
* one node in the cluster, failure of restart-recovery will fail the
* test.
*/
@Test public void testPipelineRecoveryOnOOB() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,"15");
MiniDFSCluster cluster=null;
try {
int numDataNodes=1;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
FileSystem fileSys=cluster.getFileSystem();
Path file=new Path("dataprotocol2.dat");
DFSTestUtil.createFile(fileSys,file,10240L,(short)1,0L);
DFSOutputStream out=(DFSOutputStream)(fileSys.append(file).getWrappedStream());
out.write(1);
out.hflush();
DFSAdmin dfsadmin=new DFSAdmin(conf);
DataNode dn=cluster.getDataNodes().get(0);
final String dnAddr=dn.getDatanodeId().getIpcAddr(false);
final String[] args1={"-shutdownDatanode",dnAddr,"upgrade"};
Assert.assertEquals(0,dfsadmin.run(args1));
Thread.sleep(4000);
cluster.restartDataNode(0,true);
out.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Test restart timeout
*/
@Test public void testPipelineRecoveryOnRestartFailure() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,"5");
MiniDFSCluster cluster=null;
try {
int numDataNodes=2;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
FileSystem fileSys=cluster.getFileSystem();
Path file=new Path("dataprotocol3.dat");
DFSTestUtil.createFile(fileSys,file,10240L,(short)2,0L);
DFSOutputStream out=(DFSOutputStream)(fileSys.append(file).getWrappedStream());
out.write(1);
out.hflush();
DFSAdmin dfsadmin=new DFSAdmin(conf);
DataNode dn=cluster.getDataNodes().get(0);
final String dnAddr1=dn.getDatanodeId().getIpcAddr(false);
final String[] args1={"-shutdownDatanode",dnAddr1,"upgrade"};
Assert.assertEquals(0,dfsadmin.run(args1));
Thread.sleep(4000);
out.close();
out=(DFSOutputStream)(fileSys.append(file).getWrappedStream());
out.write(1);
out.hflush();
dn=cluster.getDataNodes().get(1);
final String dnAddr2=dn.getDatanodeId().getIpcAddr(false);
final String[] args2={"-shutdownDatanode",dnAddr2,"upgrade"};
Assert.assertEquals(0,dfsadmin.run(args2));
Thread.sleep(4000);
try {
out.close();
assert false;
}
catch ( IOException ioe) {
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Read a file served entirely from one DN. Seek around and read from
* different offsets. And verify that they all use the same socket.
* @throws Exception
*/
@Test public void testReadFromOneDN() throws Exception {
HdfsConfiguration configuration=new HdfsConfiguration();
final String contextName="testReadFromOneDNContext";
configuration.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,contextName);
configuration.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,100000000L);
BlockReaderTestUtil util=new BlockReaderTestUtil(1,configuration);
final Path testFile=new Path("/testConnCache.dat");
byte authenticData[]=util.writeFile(testFile,FILE_SIZE / 1024);
DFSClient client=new DFSClient(new InetSocketAddress("localhost",util.getCluster().getNameNodePort()),util.getConf());
ClientContext cacheContext=ClientContext.get(contextName,client.getConf());
DFSInputStream in=client.open(testFile.toString());
LOG.info("opened " + testFile.toString());
byte[] dataBuf=new byte[BLOCK_SIZE];
pread(in,0,dataBuf,0,dataBuf.length,authenticData);
pread(in,FILE_SIZE - dataBuf.length,dataBuf,0,dataBuf.length,authenticData);
pread(in,1024,dataBuf,0,dataBuf.length,authenticData);
pread(in,-1,dataBuf,0,dataBuf.length,authenticData);
pread(in,64,dataBuf,0,dataBuf.length / 2,authenticData);
in.close();
client.close();
Assert.assertEquals(1,ClientContext.getFromConf(configuration).getPeerCache().size());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testDFSAddressConfig() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
ArrayList dns=cluster.getDataNodes();
DataNode dn=dns.get(0);
String selfSocketAddr=dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
for (int i=0; i < dns.size(); i++) {
DataNodeProperties dnp=cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode",dnp);
}
conf.unset(DFS_DATANODE_ADDRESS_KEY);
conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);
cluster.startDataNodes(conf,1,true,StartupOption.REGULAR,null,null,null,false,true);
dns=cluster.getDataNodes();
dn=dns.get(0);
selfSocketAddr=dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
for (int i=0; i < dns.size(); i++) {
DataNodeProperties dnp=cluster.stopDataNode(i);
assertNotNull("Should have been able to stop simulated datanode",dnp);
}
conf.set(DFS_DATANODE_ADDRESS_KEY,"0.0.0.0:0");
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY,"0.0.0.0:0");
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,"0.0.0.0:0");
cluster.startDataNodes(conf,1,true,StartupOption.REGULAR,null,null,null,false,true);
dns=cluster.getDataNodes();
dn=dns.get(0);
selfSocketAddr=dn.getXferAddress().toString();
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
cluster.shutdown();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testExcludedNodesForgiveness() throws IOException {
conf.setLong(DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,2500);
conf.setInt("io.bytes.per.checksum",512);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
List props=cluster.dataNodes;
FileSystem fs=cluster.getFileSystem();
Path filePath=new Path("/testForgivingExcludedNodes");
byte[] bytes=new byte[256];
for (int index=0; index < bytes.length; index++) {
bytes[index]='0';
}
FSDataOutputStream out=fs.create(filePath,true,4096,(short)3,512);
out.write(bytes);
out.write(bytes);
out.hflush();
DataNodeProperties two=cluster.stopDataNode(2);
DataNodeProperties one=cluster.stopDataNode(1);
out.write(bytes);
out.write(bytes);
out.hflush();
Assert.assertEquals(true,cluster.restartDataNode(one,true));
Assert.assertEquals(true,cluster.restartDataNode(two,true));
cluster.waitActive();
ThreadUtil.sleepAtLeastIgnoreInterrupts(5000);
cluster.stopDataNode(0);
try {
out.write(bytes);
out.hflush();
out.close();
}
catch ( Exception e) {
fail("Excluded DataNodes should be forgiven after a while and " + "not cause file writing exception of: '" + e.getMessage() + "'");
}
}
InternalCallVerifier BooleanVerifier
/**
* Test to verify IPFailoverProxyProvider is not requiring logical URI.
*/
@Test public void testIPFailoverProxyProviderLogicalUri() throws Exception {
Configuration config=new HdfsConfiguration(conf);
URI nnUri=cluster.getURI(0);
config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + nnUri.getHost(),IPFailoverProxyProvider.class.getName());
assertFalse("IPFailoverProxyProvider should not use logical URI.",HAUtil.useLogicalUri(config,nnUri));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that client failover works when an active NN dies and the standby
* takes over.
*/
@Test public void testDfsClientFailover() throws IOException, URISyntaxException {
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.createFile(fs,TEST_FILE,FILE_LENGTH_TO_VERIFY,(short)1,1L);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY);
Path withPort=new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":"+ NameNode.DEFAULT_PORT+ "/"+ TEST_FILE.toUri().getPath());
FileSystem fs2=withPort.getFileSystem(fs.getConf());
assertTrue(fs2.exists(withPort));
fs.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test to verify legacy proxy providers are correctly wrapped.
*/
@Test public void testWrappedFailoverProxyProvider() throws Exception {
Configuration config=new HdfsConfiguration(conf);
String logicalName=HATestUtil.getLogicalHostname(cluster);
HATestUtil.setFailoverConfigurations(cluster,config,logicalName);
config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,DummyLegacyFailoverProxyProvider.class.getName());
Path p=new Path("hdfs://" + logicalName + "/");
SecurityUtil.setTokenServiceUseIp(false);
assertTrue("Legacy proxy providers should use logical URI.",HAUtil.useLogicalUri(config,p.toUri()));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that getAdditionalBlock() and close() are idempotent. This allows
* a client to safely retry a call and still produce a correct
* file. See HDFS-3031.
*/
@Test public void testIdempotentAllocateBlockAndClose() throws Exception {
final String src="/testIdempotentAllocateBlock";
Path file=new Path(src);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,4096);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient client=new DFSClient(null,spyNN,conf,null);
doAnswer(new Answer(){
@Override public LocatedBlock answer( InvocationOnMock invocation) throws Throwable {
LocatedBlock ret=(LocatedBlock)invocation.callRealMethod();
LocatedBlocks lb=cluster.getNameNodeRpc().getBlockLocations(src,0,Long.MAX_VALUE);
int blockCount=lb.getLocatedBlocks().size();
assertEquals(lb.getLastLocatedBlock().getBlock(),ret.getBlock());
LocatedBlock ret2=(LocatedBlock)invocation.callRealMethod();
lb=cluster.getNameNodeRpc().getBlockLocations(src,0,Long.MAX_VALUE);
int blockCount2=lb.getLocatedBlocks().size();
assertEquals(lb.getLastLocatedBlock().getBlock(),ret2.getBlock());
assertEquals(blockCount,blockCount2);
return ret2;
}
}
).when(spyNN).addBlock(Mockito.anyString(),Mockito.anyString(),Mockito.any(),Mockito.any(),Mockito.anyLong(),Mockito.any());
doAnswer(new Answer(){
@Override public Boolean answer( InvocationOnMock invocation) throws Throwable {
LOG.info("Called complete(: " + Joiner.on(",").join(invocation.getArguments()) + ")");
if (!(Boolean)invocation.callRealMethod()) {
LOG.info("Complete call returned false, not faking a retry RPC");
return false;
}
try {
boolean ret=(Boolean)invocation.callRealMethod();
LOG.info("Complete call returned true, faked second RPC. " + "Returned: " + ret);
return ret;
}
catch ( Throwable t) {
LOG.error("Idempotent retry threw exception",t);
throw t;
}
}
}
).when(spyNN).complete(Mockito.anyString(),Mockito.anyString(),Mockito.any(),anyLong());
OutputStream stm=client.create(file.toString(),true);
try {
AppendTestUtil.write(stm,0,10000);
stm.close();
stm=null;
}
finally {
IOUtils.cleanup(LOG,stm);
}
Mockito.verify(spyNN,Mockito.atLeastOnce()).addBlock(Mockito.anyString(),Mockito.anyString(),Mockito.any(),Mockito.any(),Mockito.anyLong(),Mockito.any());
Mockito.verify(spyNN,Mockito.atLeastOnce()).complete(Mockito.anyString(),Mockito.anyString(),Mockito.any(),anyLong());
AppendTestUtil.check(fs,file,10000);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test that checksum failures are recovered from by the next read on the same
* DFSInputStream. Corruption information is not persisted from read call to
* read call, so the client should expect consecutive calls to behave the same
* way. See HDFS-3067.
*/
@Test public void testRetryOnChecksumFailure() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
final short REPL_FACTOR=1;
final long FILE_LENGTH=512L;
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/corrupted");
DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L);
DFSTestUtil.waitReplication(fs,path,REPL_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted);
InetSocketAddress nnAddr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(nnAddr,conf);
DFSInputStream dis=client.open(path.toString());
byte[] arr=new byte[(int)FILE_LENGTH];
for (int i=0; i < 2; ++i) {
try {
dis.read(arr,0,(int)FILE_LENGTH);
fail("Expected ChecksumException not thrown");
}
catch ( Exception ex) {
GenericTestUtils.assertExceptionContains("Checksum error",ex);
}
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetFileChecksum() throws Exception {
final String f="/testGetFileChecksum";
final Path p=new Path(f);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,p,1L << 20,(short)3,20100402L);
final FileChecksum cs1=fs.getFileChecksum(p);
assertTrue(cs1 != null);
final List locatedblocks=DFSClient.callGetBlockLocations(cluster.getNameNodeRpc(),f,0,Long.MAX_VALUE).getLocatedBlocks();
final DatanodeInfo first=locatedblocks.get(0).getLocations()[0];
cluster.stopDataNode(first.getXferAddr());
final FileChecksum cs2=fs.getFileChecksum(p);
assertEquals(cs1,cs2);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* This tests that DFSInputStream failures are counted for a given read
* operation, and not over the lifetime of the stream. It is a regression
* test for HDFS-127.
*/
@Test public void testFailuresArePerOperation() throws Exception {
long fileSize=4096;
Path file=new Path("/testFile");
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,2 * 1000);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient client=new DFSClient(null,spyNN,conf,null);
int maxBlockAcquires=client.getMaxBlockAcquireFailures();
assertTrue(maxBlockAcquires > 0);
DFSTestUtil.createFile(fs,file,fileSize,(short)1,12345L);
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires + 1)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
try {
IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true);
fail("Didn't get exception");
}
catch ( IOException ioe) {
DFSClient.LOG.info("Got expected exception",ioe);
}
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
IOUtils.copyBytes(client.open(file.toString()),new IOUtils.NullOutputStream(),conf,true);
DFSClient.LOG.info("Starting test case for failure reset");
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
DFSInputStream is=client.open(file.toString());
byte buf[]=new byte[10];
IOUtils.readFully(is,buf,0,buf.length);
DFSClient.LOG.info("First read successful after some failures.");
doAnswer(new FailNTimesAnswer(preSpyNN,maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(),anyLong(),anyLong());
is.openInfo();
is.seek(0);
IOUtils.readFully(is,buf,0,buf.length);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests mkdir will not create directory when parent is missing.
*/
@Test public void testMkdir() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs=cluster.getFileSystem();
try {
assertTrue(dfs.mkdir(new Path("/mkdir-" + Time.now()),FsPermission.getDefault()));
IOException expectedException=null;
String filePath="/mkdir-file-" + Time.now();
DFSTestUtil.writeFile(dfs,new Path(filePath),"hello world");
try {
dfs.mkdir(new Path(filePath + "/mkdir"),FsPermission.getDefault());
}
catch ( IOException e) {
expectedException=e;
}
assertTrue("Create a directory when parent dir exists as file using" + " mkdir() should throw ParentNotDirectoryException ",expectedException != null && expectedException instanceof ParentNotDirectoryException);
expectedException=null;
try {
dfs.mkdir(new Path("/non-exist/mkdir-" + Time.now()),FsPermission.getDefault());
}
catch ( IOException e) {
expectedException=e;
}
assertTrue("Create a directory in a non-exist parent dir using" + " mkdir() should throw FileNotFoundException ",expectedException != null && expectedException instanceof FileNotFoundException);
}
finally {
dfs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests mkdirs can create a directory that does not exist and will
* not create a subdirectory off a file. Regression test for HADOOP-281.
*/
@Test public void testDFSMkdirs() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fileSys=cluster.getFileSystem();
try {
Path myPath=new Path("/test/mkdirs");
assertTrue(fileSys.mkdirs(myPath));
assertTrue(fileSys.exists(myPath));
assertTrue(fileSys.mkdirs(myPath));
Path myFile=new Path("/test/mkdirs/myFile");
DFSTestUtil.writeFile(fileSys,myFile,"hello world");
Path myIllegalPath=new Path("/test/mkdirs/myFile/subdir");
Boolean exist=true;
try {
fileSys.mkdirs(myIllegalPath);
}
catch ( IOException e) {
exist=false;
}
assertFalse(exist);
assertFalse(fileSys.exists(myIllegalPath));
fileSys.delete(myFile,true);
}
finally {
fileSys.close();
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The close() method of DFSOutputStream should never throw the same exception
* twice. See HDFS-5335 for details.
*/
@Test public void testCloseTwice() throws IOException {
DistributedFileSystem fs=cluster.getFileSystem();
FSDataOutputStream os=fs.create(new Path("/test"));
DFSOutputStream dos=(DFSOutputStream)Whitebox.getInternalState(os,"wrappedStream");
@SuppressWarnings("unchecked") AtomicReference ex=(AtomicReference)Whitebox.getInternalState(dos,"lastException");
Assert.assertEquals(null,ex.get());
dos.close();
IOException dummy=new IOException("dummy");
ex.set(dummy);
try {
dos.close();
}
catch ( IOException e) {
Assert.assertEquals(e,dummy);
}
Assert.assertEquals(null,ex.get());
dos.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemove() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs=cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
long dfsUsedStart=getTotalDfsUsed(cluster);
{
final int fileCount=100;
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
createFile(fs,a);
}
long dfsUsedMax=getTotalDfsUsed(cluster);
for (int i=0; i < fileCount; i++) {
Path a=new Path(dir,"a" + i);
fs.delete(a,false);
}
Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000);
long dfsUsedFinal=getTotalDfsUsed(cluster);
assertEquals("All blocks should be gone. start=" + dfsUsedStart + " max="+ dfsUsedMax+ " final="+ dfsUsedFinal,dfsUsedStart,dfsUsedFinal);
}
fs.delete(dir,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRename() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs=cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
{
Path a=new Path(dir,"a");
Path aa=new Path(dir,"aa");
Path b=new Path(dir,"b");
createFile(fs,a);
assertEquals(0,countLease(cluster));
DataOutputStream aa_out=fs.create(aa);
aa_out.writeBytes("something");
assertEquals(1,countLease(cluster));
list(fs,"rename0");
fs.rename(a,b);
list(fs,"rename1");
aa_out.writeBytes(" more");
aa_out.close();
list(fs,"rename2");
assertEquals(0,countLease(cluster));
}
{
Path dstPath=new Path("/c/d");
assertFalse(fs.exists(dstPath));
assertFalse(fs.rename(dir,dstPath));
}
{
Path src=new Path("/a/b");
Path dst=new Path("/a/b/c");
createFile(fs,new Path(src,"foo"));
assertFalse(fs.rename(src,dst));
assertFalse(fs.rename(src.getParent(),dst.getParent()));
}
{
Path src=new Path("/testPrefix");
Path dst=new Path("/testPrefixfile");
createFile(fs,src);
assertTrue(fs.rename(src,dst));
}
{
Path src=new Path("/a/b/c");
createFile(fs,src);
assertTrue(fs.rename(src,src));
assertFalse(fs.rename(new Path("/a/b"),new Path("/a/b/")));
assertTrue(fs.rename(src,new Path("/a/b/c/")));
}
fs.delete(dir,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Check the blocks of dst file are cleaned after rename with overwrite
*/
@Test(timeout=120000) public void testRenameWithOverwrite() throws Exception {
final short replFactor=2;
final long blockSize=512;
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replFactor).build();
DistributedFileSystem dfs=cluster.getFileSystem();
try {
long fileLen=blockSize * 3;
String src="/foo/src";
String dst="/foo/dst";
Path srcPath=new Path(src);
Path dstPath=new Path(dst);
DFSTestUtil.createFile(dfs,srcPath,fileLen,replFactor,1);
DFSTestUtil.createFile(dfs,dstPath,fileLen,replFactor,1);
LocatedBlocks lbs=NameNodeAdapter.getBlockLocations(cluster.getNameNode(),dst,0,fileLen);
BlockManager bm=NameNodeAdapter.getNamesystem(cluster.getNameNode()).getBlockManager();
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) != null);
dfs.rename(srcPath,dstPath,Rename.OVERWRITE);
assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) == null);
}
finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* This test attempts to rollback the NameNode and DataNode under
* a number of valid and invalid conditions.
*/
@Test public void testRollback() throws Exception {
File[] baseDirs;
UpgradeUtilities.initialize();
StorageInfo storageInfo=null;
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs=conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
log("Normal NameNode rollback",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
checkResult(NAME_NODE,nameNodeDirs);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("Normal DataNode rollback",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
checkResult(DATA_NODE,dataNodeDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("Normal BlockPool rollback",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs,"current",UpgradeUtilities.getCurrentBlockPoolID(cluster));
UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs,"previous",UpgradeUtilities.getCurrentBlockPoolID(cluster));
storageInfo=new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION - 1,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),NodeType.DATA_NODE);
File[] dataCurrentDirs=new File[dataNodeDirs.length];
for (int i=0; i < dataNodeDirs.length; i++) {
dataCurrentDirs[i]=new File((new Path(dataNodeDirs[i] + "/current")).toString());
}
UpgradeUtilities.createDataNodeVersionFile(dataCurrentDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
assertTrue(cluster.isDataNodeUp());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback without existing previous dir",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
startNameNodeShouldFail("None of the storage directories contain previous fs state");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("DataNode rollback without existing previous dir",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.UPGRADE).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode rollback with future stored layout version in previous",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
storageInfo=new StorageInfo(Integer.MIN_VALUE,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.ROLLBACK,cluster.getNamesystem().getBlockPoolId());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode rollback with newer fsscTime in previous",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
NameNode.doRollback(conf,false);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).dnStartupOption(StartupOption.ROLLBACK).build();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current");
baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous");
storageInfo=new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),Long.MAX_VALUE,NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.ROLLBACK,cluster.getNamesystem().getBlockPoolId());
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback with no edits file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
deleteMatchingFiles(baseDirs,"edits.*");
startNameNodeShouldFail("Gap in transactions");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with no image file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
deleteMatchingFiles(baseDirs,"fsimage_.*");
startNameNodeShouldFail("No valid image files found");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with corrupt version file",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
for ( File f : baseDirs) {
UpgradeUtilities.corruptFile(new File(f,"VERSION"),"layoutVersion".getBytes(Charsets.UTF_8),"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail("file VERSION has layoutVersion missing");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with old layout version in previous",numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current");
baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous");
storageInfo=new StorageInfo(1,UpgradeUtilities.getCurrentNamespaceID(null),UpgradeUtilities.getCurrentClusterID(null),UpgradeUtilities.getCurrentFsscTime(null),NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf,baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail("Cannot rollback to storage version 1 using this version");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier ConditionMatcher HybridVerifier
@Test(timeout=300000) public void testAppendToFile() throws Exception {
final int inputFileLength=1024 * 1024;
File testRoot=new File(TEST_ROOT_DIR,"testAppendtoFileDir");
testRoot.mkdirs();
File file1=new File(testRoot,"file1");
File file2=new File(testRoot,"file2");
createLocalFileWithRandomData(inputFileLength,file1);
createLocalFileWithRandomData(inputFileLength,file2);
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),dfs instanceof DistributedFileSystem);
Path remoteFile=new Path("/remoteFile");
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv=new String[]{"-appendToFile",file1.toString(),file2.toString(),remoteFile.toString()};
int res=ToolRunner.run(shell,argv);
assertThat(res,is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(),is((long)inputFileLength * 2));
res=ToolRunner.run(shell,argv);
assertThat(res,is(0));
assertThat(dfs.getFileStatus(remoteFile).getLen(),is((long)inputFileLength * 4));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testGet() throws IOException {
DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
final String fname="testGet.txt";
Path root=new Path("/test/get");
final Path remotef=new Path(root,fname);
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
TestGetRunner runner=new TestGetRunner(){
private int count=0;
private final FsShell shell=new FsShell(conf);
public String run( int exitcode, String... options) throws IOException {
String dst=new File(TEST_ROOT_DIR,fname + ++count).getAbsolutePath();
String[] args=new String[options.length + 3];
args[0]="-get";
args[args.length - 2]=remotef.toString();
args[args.length - 1]=dst;
for (int i=0; i < options.length; i++) {
args[i + 1]=options[i];
}
show("args=" + Arrays.asList(args));
try {
assertEquals(exitcode,shell.run(args));
}
catch ( Exception e) {
assertTrue(StringUtils.stringifyException(e),false);
}
return exitcode == 0 ? DFSTestUtil.readFile(new File(dst)) : null;
}
}
;
File localf=createLocalFile(new File(TEST_ROOT_DIR,fname));
MiniDFSCluster cluster=null;
DistributedFileSystem dfs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
dfs=cluster.getFileSystem();
mkdir(dfs,root);
dfs.copyFromLocalFile(false,false,new Path(localf.getPath()),remotef);
String localfcontent=DFSTestUtil.readFile(localf);
assertEquals(localfcontent,runner.run(0));
assertEquals(localfcontent,runner.run(0,"-ignoreCrc"));
List files=getBlockFiles(cluster);
dfs.close();
cluster.shutdown();
show("files=" + files);
corrupt(files);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false).build();
dfs=cluster.getFileSystem();
assertEquals(null,runner.run(1));
String corruptedcontent=runner.run(0,"-ignoreCrc");
assertEquals(localfcontent.substring(1),corruptedcontent.substring(1));
assertEquals(localfcontent.charAt(0) + 1,corruptedcontent.charAt(0));
}
finally {
if (null != dfs) {
try {
dfs.close();
}
catch ( Exception e) {
}
}
if (null != cluster) {
cluster.shutdown();
}
localf.delete();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier ConditionMatcher HybridVerifier
@Test(timeout=300000) public void testAppendToFileBadArgs() throws Exception {
final int inputFileLength=1024 * 1024;
File testRoot=new File(TEST_ROOT_DIR,"testAppendToFileBadArgsDir");
testRoot.mkdirs();
File file1=new File(testRoot,"file1");
createLocalFileWithRandomData(inputFileLength,file1);
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem dfs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + dfs.getUri(),dfs instanceof DistributedFileSystem);
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv=new String[]{"-appendToFile",file1.toString()};
int res=ToolRunner.run(shell,argv);
assertThat(res,not(0));
Path remoteFile=new Path("/remoteFile");
argv=new String[]{"-appendToFile",file1.toString(),"-",remoteFile.toString()};
res=ToolRunner.run(shell,argv);
assertThat(res,not(0));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testRecursiveRm() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
try {
fs.mkdirs(new Path(new Path("parent"),"child"));
try {
fs.delete(new Path("parent"),false);
assert (false);
}
catch ( IOException e) {
}
try {
fs.delete(new Path("parent"),true);
}
catch ( IOException e) {
assert (false);
}
}
finally {
try {
fs.close();
}
catch ( IOException e) {
}
;
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testPut() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
try {
new File(TEST_ROOT_DIR,".f1.crc").delete();
new File(TEST_ROOT_DIR,".f2.crc").delete();
final File f1=createLocalFile(new File(TEST_ROOT_DIR,"f1"));
final File f2=createLocalFile(new File(TEST_ROOT_DIR,"f2"));
final Path root=mkdir(dfs,new Path("/test/put"));
final Path dst=new Path(root,"dst");
show("begin");
final Thread copy2ndFileThread=new Thread(){
@Override public void run(){
try {
show("copy local " + f2 + " to remote "+ dst);
dfs.copyFromLocalFile(false,false,new Path(f2.getPath()),dst);
}
catch ( IOException ioe) {
show("good " + StringUtils.stringifyException(ioe));
return;
}
assertTrue(false);
}
}
;
SecurityManager sm=System.getSecurityManager();
System.out.println("SecurityManager = " + sm);
System.setSecurityManager(new SecurityManager(){
private boolean firstTime=true;
@Override public void checkPermission( Permission perm){
if (firstTime) {
Thread t=Thread.currentThread();
if (!t.toString().contains("DataNode")) {
String s="" + Arrays.asList(t.getStackTrace());
if (s.contains("FileUtil.copyContent")) {
firstTime=false;
copy2ndFileThread.start();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
}
}
}
}
}
);
show("copy local " + f1 + " to remote "+ dst);
dfs.copyFromLocalFile(false,false,new Path(f1.getPath()),dst);
show("done");
try {
copy2ndFileThread.join();
}
catch ( InterruptedException e) {
}
System.setSecurityManager(sm);
final Path destmultiple=mkdir(dfs,new Path("/test/putmultiple"));
Path[] srcs=new Path[2];
srcs[0]=new Path(f1.getPath());
srcs[1]=new Path(f2.getPath());
dfs.copyFromLocalFile(false,false,srcs,destmultiple);
srcs[0]=new Path(destmultiple,"f1");
srcs[1]=new Path(destmultiple,"f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
final Path destmultiple2=mkdir(dfs,new Path("/test/movemultiple"));
srcs[0]=new Path(f1.getPath());
srcs[1]=new Path(f2.getPath());
dfs.moveFromLocalFile(srcs,destmultiple2);
assertFalse(f1.exists());
assertFalse(f2.exists());
srcs[0]=new Path(destmultiple2,"f1");
srcs[1]=new Path(destmultiple2,"f2");
assertTrue(dfs.exists(srcs[0]));
assertTrue(dfs.exists(srcs[1]));
f1.delete();
f2.delete();
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testDu() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem fs=cluster.getFileSystem();
PrintStream psBackup=System.out;
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
FsShell shell=new FsShell();
shell.setConf(conf);
try {
Path myPath=new Path("/test/dir");
assertTrue(fs.mkdirs(myPath));
assertTrue(fs.exists(myPath));
Path myFile=new Path("/test/dir/file");
writeFile(fs,myFile);
assertTrue(fs.exists(myFile));
Path myFile2=new Path("/test/dir/file2");
writeFile(fs,myFile2);
assertTrue(fs.exists(myFile2));
String[] args=new String[2];
args[0]="-du";
args[1]="/test/dir";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val == 0);
String returnString=out.toString();
out.reset();
assertTrue(returnString.contains("22"));
assertTrue(returnString.contains("23"));
}
finally {
System.setOut(psBackup);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testURIPaths() throws Exception {
Configuration srcConf=new HdfsConfiguration();
Configuration dstConf=new HdfsConfiguration();
MiniDFSCluster srcCluster=null;
MiniDFSCluster dstCluster=null;
File bak=new File(PathUtils.getTestDir(getClass()),"dfs_tmp_uri");
bak.mkdirs();
try {
srcCluster=new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,bak.getAbsolutePath());
dstCluster=new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
FileSystem srcFs=srcCluster.getFileSystem();
FileSystem dstFs=dstCluster.getFileSystem();
FsShell shell=new FsShell();
shell.setConf(srcConf);
String[] argv=new String[2];
argv[0]="-ls";
argv[1]=dstFs.getUri().toString() + "/";
int ret=ToolRunner.run(shell,argv);
assertEquals("ls works on remote uri ",0,ret);
dstFs.mkdirs(new Path("/hadoopdir"));
argv=new String[2];
argv[0]="-rmr";
argv[1]=dstFs.getUri().toString() + "/hadoopdir";
ret=ToolRunner.run(shell,argv);
assertEquals("-rmr works on remote uri " + argv[1],0,ret);
argv[0]="-du";
argv[1]=dstFs.getUri().toString() + "/";
ret=ToolRunner.run(shell,argv);
assertEquals("du works on remote uri ",0,ret);
File furi=new File(TEST_ROOT_DIR,"furi");
createLocalFile(furi);
argv=new String[3];
argv[0]="-put";
argv[1]=furi.toURI().toString();
argv[2]=dstFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" put is working ",0,ret);
argv[0]="-cp";
argv[1]=dstFs.getUri().toString() + "/furi";
argv[2]=srcFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" cp is working ",0,ret);
assertTrue(srcFs.exists(new Path("/furi")));
argv=new String[2];
argv[0]="-cat";
argv[1]=dstFs.getUri().toString() + "/furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" cat is working ",0,ret);
dstFs.delete(new Path("/furi"),true);
dstFs.delete(new Path("/hadoopdir"),true);
String file="/tmp/chownTest";
Path path=new Path(file);
Path parent=new Path("/tmp");
Path root=new Path("/");
TestDFSShell.writeFile(dstFs,path);
runCmd(shell,"-chgrp","-R","herbivores",dstFs.getUri().toString() + "/*");
confirmOwner(null,"herbivores",dstFs,parent,path);
runCmd(shell,"-chown","-R",":reptiles",dstFs.getUri().toString() + "/");
confirmOwner(null,"reptiles",dstFs,root,parent,path);
argv[0]="-cat";
argv[1]="hdfs:///furi";
ret=ToolRunner.run(shell,argv);
assertEquals(" default works for cat",0,ret);
argv[0]="-ls";
argv[1]="hdfs:///";
ret=ToolRunner.run(shell,argv);
assertEquals("default works for ls ",0,ret);
argv[0]="-rmr";
argv[1]="hdfs:///furi";
ret=ToolRunner.run(shell,argv);
assertEquals("default works for rm/rmr",0,ret);
}
finally {
if (null != srcCluster) {
srcCluster.shutdown();
}
if (null != dstCluster) {
dstCluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests various options of DFSShell.
*/
@Test(timeout=120000) public void testDFSShell() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
DistributedFileSystem fileSys=(DistributedFileSystem)fs;
FsShell shell=new FsShell();
shell.setConf(conf);
try {
Path myPath=new Path("/test/mkdirs");
assertTrue(fileSys.mkdirs(myPath));
assertTrue(fileSys.exists(myPath));
assertTrue(fileSys.mkdirs(myPath));
Path myFile=new Path("/test/mkdirs/myFile");
writeFile(fileSys,myFile);
assertTrue(fileSys.exists(myFile));
Path myFile2=new Path("/test/mkdirs/myFile2");
writeFile(fileSys,myFile2);
assertTrue(fileSys.exists(myFile2));
{
String[] args=new String[2];
args[0]="-rm";
args[1]="/test/mkdirs/myFile*";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val == 0);
assertFalse(fileSys.exists(myFile));
assertFalse(fileSys.exists(myFile2));
writeFile(fileSys,myFile);
assertTrue(fileSys.exists(myFile));
writeFile(fileSys,myFile2);
assertTrue(fileSys.exists(myFile2));
}
{
String[] args=new String[3];
args[0]="-cat";
args[1]="/test/mkdirs/myFile";
args[2]="/test/mkdirs/myFile2";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run: " + StringUtils.stringifyException(e));
}
assertTrue(val == 0);
}
fileSys.delete(myFile2,true);
{
String[] args=new String[2];
args[0]="-cat";
args[1]="/test/mkdirs/myFile1";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val != 0);
}
{
String[] args=new String[2];
args[0]="-rm";
args[1]="/test/mkdirs/myFile1";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val != 0);
}
{
String[] args=new String[2];
args[0]="-rm";
args[1]="/test/mkdirs/myFile";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertTrue(val == 0);
}
{
String[] args;
int val;
args=new String[3];
args[0]="-test";
args[1]="-e";
args[2]="/test/mkdirs/noFileHere";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args[1]="-z";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args=new String[2];
args[0]="-touchz";
args[1]="/test/mkdirs/isFileHere";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
args=new String[2];
args[0]="-touchz";
args[1]="/test/mkdirs/thisDirNotExists/isFileHere";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args=new String[3];
args[0]="-test";
args[1]="-e";
args[2]="/test/mkdirs/isFileHere";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
args[1]="-d";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args[1]="-z";
val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
}
{
String[] args=new String[2];
args[0]="-mkdir";
args[1]="/test/dir1";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
String[] args1=new String[3];
args1[0]="-cp";
args1[1]="/test/dir1";
args1[2]="/test/dir1/dir2";
val=0;
try {
val=shell.run(args1);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
args1[0]="-cp";
args1[1]="/test/dir1";
args1[2]="/test/dir1foo";
val=-1;
try {
val=shell.run(args1);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-f";
args[2]="/test/mkdirs/noFileHere";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-f";
args[2]="/test/mkdirs";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
}
{
writeFile(fileSys,myFile);
assertTrue(fileSys.exists(myFile));
String[] args=new String[3];
args[0]="-test";
args[1]="-f";
args[2]=myFile.toString();
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-s";
args[2]="/test/mkdirs/noFileHere";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-s";
args[2]="/test/mkdirs/isFileHere";
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(1,val);
}
{
String[] args=new String[3];
args[0]="-test";
args[1]="-s";
args[2]=myFile.toString();
int val=-1;
try {
val=shell.run(args);
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
assertEquals(0,val);
}
}
finally {
try {
fileSys.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier
/**
* default setting is file:// which is not a DFS
* so DFSAdmin should throw and catch InvalidArgumentException
* and return -1 exit code.
* @throws Exception
*/
@Test(timeout=30000) public void testInvalidShell() throws Exception {
Configuration conf=new Configuration();
DFSAdmin admin=new DFSAdmin();
admin.setConf(conf);
int res=admin.run(new String[]{"-refreshNodes"});
assertEquals("expected to fail -1",res,-1);
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test(timeout=30000) public void testSetrep() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir1="/tmp/TestDFSShell-testSetrep-" + counter.getAndIncrement();
final String testdir2=testdir1 + "/nestedDir";
final Path hdfsFile1=new Path(testdir1,"testFileForSetrep");
final Path hdfsFile2=new Path(testdir2,"testFileForSetrep");
final Short oldRepFactor=new Short((short)1);
final Short newRepFactor=new Short((short)3);
try {
String[] argv;
cluster.waitActive();
fs=cluster.getFileSystem();
assertThat(fs.mkdirs(new Path(testdir2)),is(true));
shell=new FsShell(conf);
fs.create(hdfsFile1,true).close();
fs.create(hdfsFile2,true).close();
argv=new String[]{"-setrep",newRepFactor.toString(),hdfsFile1.toString()};
assertThat(shell.run(argv),is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(),is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(),is(oldRepFactor));
argv=new String[]{"-setrep",newRepFactor.toString(),testdir1};
assertThat(shell.run(argv),is(SUCCESS));
assertThat(fs.getFileStatus(hdfsFile1).getReplication(),is(newRepFactor));
assertThat(fs.getFileStatus(hdfsFile2).getReplication(),is(newRepFactor));
}
finally {
if (shell != null) {
shell.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testCount() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs=cluster.getFileSystem();
FsShell shell=new FsShell();
shell.setConf(conf);
try {
String root=createTree(dfs,"count");
runCount(root,2,4,shell);
runCount(root + "2",2,1,shell);
runCount(root + "2/f1",0,1,shell);
runCount(root + "2/sub",1,0,shell);
final FileSystem localfs=FileSystem.getLocal(conf);
Path localpath=new Path(TEST_ROOT_DIR,"testcount");
localpath=localpath.makeQualified(localfs.getUri(),localfs.getWorkingDirectory());
localfs.mkdirs(localpath);
final String localstr=localpath.toString();
System.out.println("localstr=" + localstr);
runCount(localstr,1,0,shell);
assertEquals(0,runCmd(shell,"-count",root,localstr));
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testZeroSizeFile() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
try {
final File f1=new File(TEST_ROOT_DIR,"f1");
assertTrue(!f1.exists());
assertTrue(f1.createNewFile());
assertTrue(f1.exists());
assertTrue(f1.isFile());
assertEquals(0L,f1.length());
final Path root=mkdir(dfs,new Path("/test/zeroSizeFile"));
final Path remotef=new Path(root,"dst");
show("copy local " + f1 + " to remote "+ remotef);
dfs.copyFromLocalFile(false,false,new Path(f1.getPath()),remotef);
show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
final File f2=new File(TEST_ROOT_DIR,"f2");
assertTrue(!f2.exists());
dfs.copyToLocalFile(remotef,new Path(f2.getPath()));
assertTrue(f2.exists());
assertTrue(f2.isFile());
assertEquals(0L,f2.length());
f1.delete();
f2.delete();
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* check command error outputs and exit statuses.
*/
@Test(timeout=30000) public void testErrOutPut() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
PrintStream bak=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem srcFs=cluster.getFileSystem();
Path root=new Path("/nonexistentfile");
bak=System.err;
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream tmp=new PrintStream(out);
System.setErr(tmp);
String[] argv=new String[2];
argv[0]="-cat";
argv[1]=root.toUri().getPath();
int ret=ToolRunner.run(new FsShell(),argv);
assertEquals(" -cat returned 1 ",1,ret);
String returned=out.toString();
assertTrue("cat does not print exceptions ",(returned.lastIndexOf("Exception") == -1));
out.reset();
argv[0]="-rm";
argv[1]=root.toString();
FsShell shell=new FsShell();
shell.setConf(conf);
ret=ToolRunner.run(shell,argv);
assertEquals(" -rm returned 1 ",1,ret);
returned=out.toString();
out.reset();
assertTrue("rm prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1));
argv[0]="-rmr";
argv[1]=root.toString();
ret=ToolRunner.run(shell,argv);
assertEquals(" -rmr returned 1",1,ret);
returned=out.toString();
assertTrue("rmr prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0]="-du";
argv[1]="/nonexistentfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -du prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0]="-dus";
argv[1]="/nonexistentfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -dus prints reasonable error",(returned.lastIndexOf("No such file or directory") != -1));
out.reset();
argv[0]="-ls";
argv[1]="/nonexistenfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -ls does not return Found 0 items",(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0]="-ls";
argv[1]="/nonexistentfile";
ret=ToolRunner.run(shell,argv);
assertEquals(" -lsr should fail ",1,ret);
out.reset();
srcFs.mkdirs(new Path("/testdir"));
argv[0]="-ls";
argv[1]="/testdir";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" -ls does not print out anything ",(returned.lastIndexOf("Found 0") == -1));
out.reset();
argv[0]="-ls";
argv[1]="/user/nonxistant/*";
ret=ToolRunner.run(shell,argv);
assertEquals(" -ls on nonexistent glob returns 1",1,ret);
out.reset();
argv[0]="-mkdir";
argv[1]="/testdir";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" -mkdir returned 1 ",1,ret);
assertTrue(" -mkdir returned File exists",(returned.lastIndexOf("File exists") != -1));
Path testFile=new Path("/testfile");
OutputStream outtmp=srcFs.create(testFile);
outtmp.write(testFile.toString().getBytes());
outtmp.close();
out.reset();
argv[0]="-mkdir";
argv[1]="/testfile";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" -mkdir returned 1",1,ret);
assertTrue(" -mkdir returned this is a file ",(returned.lastIndexOf("not a directory") != -1));
out.reset();
argv=new String[3];
argv[0]="-mv";
argv[1]="/testfile";
argv[2]="file";
ret=ToolRunner.run(shell,argv);
assertEquals("mv failed to rename",1,ret);
out.reset();
argv=new String[3];
argv[0]="-mv";
argv[1]="/testfile";
argv[2]="/testfiletest";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue("no output from rename",(returned.lastIndexOf("Renamed") == -1));
out.reset();
argv[0]="-mv";
argv[1]="/testfile";
argv[2]="/testfiletmp";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertTrue(" unix like output",(returned.lastIndexOf("No such file or") != -1));
out.reset();
argv=new String[1];
argv[0]="-du";
srcFs.mkdirs(srcFs.getHomeDirectory());
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" no error ",0,ret);
assertTrue("empty path specified",(returned.lastIndexOf("empty string") == -1));
out.reset();
argv=new String[3];
argv[0]="-test";
argv[1]="-d";
argv[2]="/no/such/dir";
ret=ToolRunner.run(shell,argv);
returned=out.toString();
assertEquals(" -test -d wrong result ",1,ret);
assertTrue(returned.isEmpty());
}
finally {
if (bak != null) {
System.setErr(bak);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCopyToLocal() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
DistributedFileSystem dfs=(DistributedFileSystem)fs;
FsShell shell=new FsShell();
shell.setConf(conf);
try {
String root=createTree(dfs,"copyToLocal");
{
try {
assertEquals(0,runCmd(shell,"-copyToLocal",root + "*",TEST_ROOT_DIR));
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
File localroot=new File(TEST_ROOT_DIR,"copyToLocal");
File localroot2=new File(TEST_ROOT_DIR,"copyToLocal2");
File f1=new File(localroot,"f1");
assertTrue("Copying failed.",f1.isFile());
File f2=new File(localroot,"f2");
assertTrue("Copying failed.",f2.isFile());
File sub=new File(localroot,"sub");
assertTrue("Copying failed.",sub.isDirectory());
File f3=new File(sub,"f3");
assertTrue("Copying failed.",f3.isFile());
File f4=new File(sub,"f4");
assertTrue("Copying failed.",f4.isFile());
File f5=new File(localroot2,"f1");
assertTrue("Copying failed.",f5.isFile());
f1.delete();
f2.delete();
f3.delete();
f4.delete();
f5.delete();
sub.delete();
}
{
String[] args={"-copyToLocal","nosuchfile",TEST_ROOT_DIR};
try {
assertEquals(1,shell.run(args));
}
catch ( Exception e) {
System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage());
}
File f6=new File(TEST_ROOT_DIR,"nosuchfile");
assertTrue(!f6.exists());
}
}
finally {
try {
dfs.close();
}
catch ( Exception e) {
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testCopyCommandsToDirectoryWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path srcDir=new Path(hdfsTestDir,"srcDir");
fs.mkdirs(srcDir);
fs.setAcl(srcDir,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(srcDir,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
Path srcFile=new Path(srcDir,"srcFile");
fs.create(srcFile).close();
FileStatus status=fs.getFileStatus(srcDir);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(srcDir,USER_A1,USER_A1_VALUE);
fs.setXAttr(srcDir,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path targetDir1=new Path(hdfsTestDir,"targetDir1");
String[] argv=new String[]{"-cp","-p",srcDir.toUri().toString(),targetDir1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(targetDir1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(targetDir1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(targetDir1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir2=new Path(hdfsTestDir,"targetDir2");
argv=new String[]{"-cp","-ptop",srcDir.toUri().toString(),targetDir2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir3=new Path(hdfsTestDir,"targetDir3");
argv=new String[]{"-cp","-ptopx",srcDir.toUri().toString(),targetDir3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(targetDir3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path targetDir4=new Path(hdfsTestDir,"targetDir4");
argv=new String[]{"-cp","-ptopa",srcDir.toUri().toString(),targetDir4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir4));
Path targetDir5=new Path(hdfsTestDir,"targetDir5");
argv=new String[]{"-cp","-ptoa",srcDir.toUri().toString(),targetDir5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(targetDir5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(targetDir5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(targetDir5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir5));
}
finally {
if (shell != null) {
shell.close();
}
if (fs != null) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
fs.setPermission(src,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
acls=fs.getAclStatus(target2).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target2));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testCopyCommandsWithPreserveOption() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FsShell shell=null;
FileSystem fs=null;
final String testdir="/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-" + counter.getAndIncrement();
final Path hdfsTestDir=new Path(testdir);
try {
fs=cluster.getFileSystem();
fs.mkdirs(hdfsTestDir);
Path src=new Path(hdfsTestDir,"srcfile");
fs.create(src).close();
fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE)));
FileStatus status=fs.getFileStatus(src);
final long mtime=status.getModificationTime();
final long atime=status.getAccessTime();
final String owner=status.getOwner();
final String group=status.getGroup();
final FsPermission perm=status.getPermission();
fs.setXAttr(src,USER_A1,USER_A1_VALUE);
fs.setXAttr(src,TRUSTED_A1,TRUSTED_A1_VALUE);
shell=new FsShell(conf);
Path target1=new Path(hdfsTestDir,"targetfile1");
String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()};
int ret=ToolRunner.run(shell,argv);
assertEquals("cp -p is not working",SUCCESS,ret);
FileStatus targetStatus=fs.getFileStatus(target1);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
FsPermission targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
Map xattrs=fs.getXAttrs(target1);
assertTrue(xattrs.isEmpty());
List acls=fs.getAclStatus(target1).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target2=new Path(hdfsTestDir,"targetfile2");
argv=new String[]{"-cp","-ptop",src.toUri().toString(),target2.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptop is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target2);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target2);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target2).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target3=new Path(hdfsTestDir,"targetfile3");
argv=new String[]{"-cp","-ptopx",src.toUri().toString(),target3.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopx is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target3);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target3);
assertEquals(xattrs.size(),2);
assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1));
assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1));
acls=fs.getAclStatus(target3).getEntries();
assertTrue(acls.isEmpty());
assertFalse(targetPerm.getAclBit());
Path target4=new Path(hdfsTestDir,"targetfile4");
argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target4.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptopa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target4);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target4);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target4).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target4));
Path target5=new Path(hdfsTestDir,"targetfile5");
argv=new String[]{"-cp","-ptoa",src.toUri().toString(),target5.toUri().toString()};
ret=ToolRunner.run(shell,argv);
assertEquals("cp -ptoa is not working",SUCCESS,ret);
targetStatus=fs.getFileStatus(target5);
assertEquals(mtime,targetStatus.getModificationTime());
assertEquals(atime,targetStatus.getAccessTime());
assertEquals(owner,targetStatus.getOwner());
assertEquals(group,targetStatus.getGroup());
targetPerm=targetStatus.getPermission();
assertTrue(perm.equals(targetPerm));
xattrs=fs.getXAttrs(target5);
assertTrue(xattrs.isEmpty());
acls=fs.getAclStatus(target5).getEntries();
assertFalse(acls.isEmpty());
assertTrue(targetPerm.getAclBit());
assertEquals(fs.getAclStatus(src),fs.getAclStatus(target5));
}
finally {
if (null != shell) {
shell.close();
}
if (null != fs) {
fs.delete(hdfsTestDir,true);
fs.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testLsr() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem dfs=cluster.getFileSystem();
try {
final String root=createTree(dfs,"lsr");
dfs.mkdirs(new Path(root,"zzz"));
runLsr(new FsShell(conf),root,0);
final Path sub=new Path(root,"sub");
dfs.setPermission(sub,new FsPermission((short)0));
final UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
final String tmpusername=ugi.getShortUserName() + "1";
UserGroupInformation tmpUGI=UserGroupInformation.createUserForTesting(tmpusername,new String[]{tmpusername});
String results=tmpUGI.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws Exception {
return runLsr(new FsShell(conf),root,1);
}
}
);
assertTrue(results.contains("zzz"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test ensures the appropriate response (successful or failure) from
* a Datanode when the system is started with differing version combinations.
*
* For each 3-tuple in the cross product
* ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
* {currentNamespaceId,incorrectNamespaceId},
* {pastFsscTime,currentFsscTime,futureFsscTime})
* 1. Startup Namenode with version file containing
* (currentLayoutVersion,currentNamespaceId,currentFsscTime)
* 2. Attempt to startup Datanode with version file containing
* this iterations version 3-tuple
*
*/
@Test(timeout=300000) public void testVersions() throws Exception {
UpgradeUtilities.initialize();
Configuration conf=UpgradeUtilities.initializeStorageStateConf(1,new HdfsConfiguration());
StorageData[] versions=initializeVersions();
UpgradeUtilities.createNameNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY),"current");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.REGULAR).build();
StorageData nameNodeVersion=new StorageData(HdfsConstants.NAMENODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),UpgradeUtilities.getCurrentBlockPoolID(cluster));
log("NameNode version info",NAME_NODE,null,nameNodeVersion);
String bpid=UpgradeUtilities.getCurrentBlockPoolID(cluster);
for (int i=0; i < versions.length; i++) {
File[] storage=UpgradeUtilities.createDataNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY),"current");
log("DataNode version info",DATA_NODE,i,versions[i]);
UpgradeUtilities.createDataNodeVersionFile(storage,versions[i].storageInfo,bpid,versions[i].blockPoolId);
try {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
}
catch ( Exception ignore) {
}
assertTrue(cluster.getNameNode() != null);
assertEquals(isVersionCompatible(nameNodeVersion,versions[i]),cluster.isDataNodeUp());
cluster.shutdownDataNodes();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
/**
* This test iterates over the testCases table for Datanode storage and
* attempts to startup the DataNode normally.
*/
@Test public void testDNStorageStates() throws Exception {
String[] baseDirs;
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
for (int i=0; i < NUM_DN_TEST_CASES; i++) {
boolean[] testCase=testCases[i];
boolean shouldRecover=testCase[SHOULD_RECOVER];
boolean curAfterRecover=testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover=testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("DATA_NODE recovery",numDirs,i,testCase);
createNameNodeStorageState(new boolean[]{true,true,false,false,false});
cluster=createCluster(conf);
baseDirs=createDataNodeStorageState(testCase);
if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS]&& !testCase[REMOVED_TMP_EXISTS]) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
}
else {
if (shouldRecover) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
checkResultDataNode(baseDirs,curAfterRecover,prevAfterRecover);
}
else {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertFalse(cluster.getDataNodes().get(0).isDatanodeUp());
}
}
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
/**
* This test iterates over the testCases table for block pool storage and
* attempts to startup the DataNode normally.
*/
@Test public void testBlockPoolStorageStates() throws Exception {
String[] baseDirs;
String bpid=UpgradeUtilities.getCurrentBlockPoolID(null);
for (int numDirs=1; numDirs <= 2; numDirs++) {
conf=new HdfsConfiguration();
conf.setInt("dfs.datanode.scan.period.hours",-1);
conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf);
for (int i=0; i < NUM_DN_TEST_CASES; i++) {
boolean[] testCase=testCases[i];
boolean shouldRecover=testCase[SHOULD_RECOVER];
boolean curAfterRecover=testCase[CURRENT_SHOULD_EXIST_AFTER_RECOVER];
boolean prevAfterRecover=testCase[PREVIOUS_SHOULD_EXIST_AFTER_RECOVER];
log("BLOCK_POOL recovery",numDirs,i,testCase);
createNameNodeStorageState(new boolean[]{true,true,false,false,false});
cluster=createCluster(conf);
baseDirs=createBlockPoolStorageState(bpid,testCase);
if (!testCase[CURRENT_EXISTS] && !testCase[PREVIOUS_EXISTS] && !testCase[PREVIOUS_TMP_EXISTS]&& !testCase[REMOVED_TMP_EXISTS]) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
}
else {
if (shouldRecover) {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
checkResultBlockPool(baseDirs,curAfterRecover,prevAfterRecover);
}
else {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertFalse(cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
}
}
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test upgrade from 0.22 image with corrupt md5, make sure it
* fails to upgrade
*/
@Test public void testUpgradeFromCorruptRel22Image() throws IOException {
unpackStorage(HADOOP22_IMAGE,HADOOP_DFS_DIR_TXT);
File baseDir=new File(MiniDFSCluster.getBaseDirectory());
FSImageTestUtil.corruptVersionFile(new File(baseDir,"name1/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222");
FSImageTestUtil.corruptVersionFile(new File(baseDir,"name2/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222");
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
try {
upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).numDataNodes(4));
fail("Upgrade did not fail with bad MD5");
}
catch ( IOException ioe) {
String msg=StringUtils.stringifyException(ioe);
if (!msg.contains("Failed to load an FSImage file")) {
throw ioe;
}
int md5failures=appender.countExceptionsWithMessage(" is corrupt with MD5 checksum of ");
assertEquals("Upgrade did not fail with bad MD5",1,md5failures);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetPassword() throws Exception {
File testDir=new File(System.getProperty("test.build.data","target/test-dir"));
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir+ "/test.jks";
File file=new File(testDir,"test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,ourUrl);
CredentialProvider provider=CredentialProviderFactory.getProviders(conf).get(0);
char[] keypass={'k','e','y','p','a','s','s'};
char[] storepass={'s','t','o','r','e','p','a','s','s'};
char[] trustpass={'t','r','u','s','t','p','a','s','s'};
assertEquals(null,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
assertEquals(null,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
assertEquals(null,provider.getCredentialEntry(DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
try {
provider.createCredentialEntry(DFS_SERVER_HTTPS_KEYPASSWORD_KEY,keypass);
provider.createCredentialEntry(DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY,storepass);
provider.createCredentialEntry(DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY,trustpass);
provider.flush();
}
catch ( Exception e) {
e.printStackTrace();
throw e;
}
assertArrayEquals(keypass,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYPASSWORD_KEY).getCredential());
assertArrayEquals(storepass,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY).getCredential());
assertArrayEquals(trustpass,provider.getCredentialEntry(DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY).getCredential());
Assert.assertEquals("keypass",DFSUtil.getPassword(conf,DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
Assert.assertEquals("storepass",DFSUtil.getPassword(conf,DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
Assert.assertEquals("trustpass",DFSUtil.getPassword(conf,DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
Assert.assertEquals(null,DFSUtil.getPassword(conf,"invalid-alias"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Ensure that fs.defaultFS is set in the configuration even if neither HA nor
* Federation is enabled.
* Regression test for HDFS-3351.
*/
@Test public void testConfModificationNoFederationOrHa(){
final HdfsConfiguration conf=new HdfsConfiguration();
String nsId=null;
String nnId=null;
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"localhost:1234");
assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
NameNode.initializeGenericKeys(conf,nsId,nnId);
assertEquals("hdfs://localhost:1234",conf.get(FS_DEFAULT_NAME_KEY));
}
InternalCallVerifier BooleanVerifier
/**
* Test constructing LocatedBlock with null cachedLocs
*/
@Test public void testLocatedBlockConstructorWithNullCachedLocs(){
DatanodeInfo d=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] ds=new DatanodeInfo[1];
ds[0]=d;
ExtendedBlock b1=new ExtendedBlock("bpid",1,1,1);
LocatedBlock l1=new LocatedBlock(b1,ds,null,null,0,false,null);
final DatanodeInfo[] cachedLocs=l1.getCachedLocations();
assertTrue(cachedLocs.length == 0);
}
InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-2934.
*/
@Test public void testSomeConfsNNSpecificSomeNSSpecific(){
final HdfsConfiguration conf=new HdfsConfiguration();
String key=DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
conf.set(key,"global-default");
conf.set(key + ".ns1","ns1-override");
conf.set(key + ".ns1.nn1","nn1-override");
Configuration newConf=new Configuration(conf);
NameNode.initializeGenericKeys(newConf,"ns2","nn1");
assertEquals("global-default",newConf.get(key));
newConf=new Configuration(conf);
NameNode.initializeGenericKeys(newConf,"ns2",null);
assertEquals("global-default",newConf.get(key));
newConf=new Configuration(conf);
NameNode.initializeGenericKeys(newConf,"ns1","nn2");
assertEquals("ns1-override",newConf.get(key));
newConf=new Configuration(conf);
NameNode.initializeGenericKeys(newConf,"ns1","nn1");
assertEquals("nn1-override",newConf.get(key));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testGetSpnegoKeytabKey(){
HdfsConfiguration conf=new HdfsConfiguration();
String defaultKey="default.spengo.key";
conf.unset(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
assertEquals("Test spnego key in config is null",defaultKey,DFSUtil.getSpnegoKeytabKey(conf,defaultKey));
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,"");
assertEquals("Test spnego key is empty",defaultKey,DFSUtil.getSpnegoKeytabKey(conf,defaultKey));
String spengoKey="spengo.key";
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,spengoKey);
assertEquals("Test spnego key is NOT null",DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,DFSUtil.getSpnegoKeytabKey(conf,defaultKey));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetNNUris() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
final String NS1_NN1_ADDR="ns1-nn1.example.com:8020";
final String NS1_NN2_ADDR="ns1-nn2.example.com:8020";
final String NS2_NN_ADDR="ns2-nn.example.com:8020";
final String NN1_ADDR="nn.example.com:8020";
final String NN1_SRVC_ADDR="nn.example.com:8021";
final String NN2_ADDR="nn2.example.com:8020";
conf.set(DFS_NAMESERVICES,"ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_ADDR);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_ADDR);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns2"),NS2_NN_ADDR);
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"hdfs://" + NN1_ADDR);
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN2_ADDR);
Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(4,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR)));
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"viewfs://vfs-name.example.com");
uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(3,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1");
uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(3,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)));
conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN1_ADDR);
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,NN1_ADDR);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,NN1_SRVC_ADDR);
uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(1,uris.size());
assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
}
InternalCallVerifier EqualityVerifier
/**
* Test to ensure nameservice specific keys in the configuration are
* copied to generic keys when the namenode starts.
*/
@Test public void testConfModificationFederationAndHa(){
final HdfsConfiguration conf=new HdfsConfiguration();
String nsId="ns1";
String nnId="nn1";
conf.set(DFS_NAMESERVICES,nsId);
conf.set(DFS_NAMESERVICE_ID,nsId);
conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId,nnId);
for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
conf.set(DFSUtil.addKeySuffixes(key,nsId,nnId),key);
}
NameNode.initializeGenericKeys(conf,nsId,nnId);
for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key,conf.get(key));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests to ensure default namenode is used as fallback
*/
@Test public void testDefaultNamenode() throws IOException {
HdfsConfiguration conf=new HdfsConfiguration();
final String hdfs_default="hdfs://localhost:9999/";
conf.set(FS_DEFAULT_NAME_KEY,hdfs_default);
Map> addrMap=DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(1,addrMap.size());
Map defaultNsMap=addrMap.get(null);
assertEquals(1,defaultNsMap.size());
assertEquals(9999,defaultNsMap.get(null).getPort());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetOnlyNameServiceIdOrNull(){
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICES,"ns1,ns2");
assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
conf.set(DFS_NAMESERVICES,"");
assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf));
conf.set(DFS_NAMESERVICES,"ns1");
assertEquals("ns1",DFSUtil.getOnlyNameServiceIdOrNull(conf));
}
InternalCallVerifier EqualityVerifier
@Test public void getNameNodeServiceAddr() throws IOException {
HdfsConfiguration conf=new HdfsConfiguration();
final String NS1_NN1_HOST="ns1-nn1.example.com:8020";
final String NS1_NN1_HOST_SVC="ns1-nn2.example.com:8021";
final String NS1_NN2_HOST="ns1-nn1.example.com:8020";
final String NS1_NN2_HOST_SVC="ns1-nn2.example.com:8021";
conf.set(DFS_NAMESERVICES,"ns1");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_HOST);
assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,null,"nn1"));
assertEquals(NS1_NN2_HOST,DFSUtil.getNamenodeServiceAddr(conf,null,"nn2"));
assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","nn1"));
assertEquals(null,DFSUtil.getNamenodeServiceAddr(conf,"invalid","nn1"));
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_HOST_SVC);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_HOST_SVC);
assertEquals(NS1_NN1_HOST_SVC,DFSUtil.getNamenodeServiceAddr(conf,null,"nn1"));
assertEquals(NS1_NN2_HOST_SVC,DFSUtil.getNamenodeServiceAddr(conf,null,"nn2"));
assertEquals("ns1",DFSUtil.getNamenodeNameServiceId(conf));
assertEquals("ns1",DFSUtil.getSecondaryNameServiceId(conf));
}
InternalCallVerifier EqualityVerifier
/**
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
* nameserviceId from the configuration returned
*/
@Test public void getNameServiceId(){
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICE_ID,"nn1");
assertEquals("nn1",DFSUtil.getNamenodeNameServiceId(conf));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test {@link DFSUtil#getNameServiceIds(Configuration)}
*/
@Test public void testGetNameServiceIds(){
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICES,"nn1,nn2");
Collection nameserviceIds=DFSUtil.getNameServiceIds(conf);
Iterator it=nameserviceIds.iterator();
assertEquals(2,nameserviceIds.size());
assertEquals("nn1",it.next().toString());
assertEquals("nn2",it.next().toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHANameNodesWithFederation() throws URISyntaxException {
HdfsConfiguration conf=new HdfsConfiguration();
final String NS1_NN1_HOST="ns1-nn1.example.com:8020";
final String NS1_NN2_HOST="ns1-nn2.example.com:8020";
final String NS2_NN1_HOST="ns2-nn1.example.com:8020";
final String NS2_NN2_HOST="ns2-nn2.example.com:8020";
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1");
conf.set(DFS_NAMESERVICES,"ns1,ns2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"ns1-nn1,ns1-nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns2"),"ns2-nn1,ns2-nn2");
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn1"),NS1_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn2"),NS1_NN2_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn1"),NS2_NN1_HOST);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn2"),NS2_NN2_HOST);
Map> map=DFSUtil.getHaNnRpcAddresses(conf);
assertTrue(HAUtil.isHAEnabled(conf,"ns1"));
assertTrue(HAUtil.isHAEnabled(conf,"ns2"));
assertFalse(HAUtil.isHAEnabled(conf,"ns3"));
assertEquals(NS1_NN1_HOST,map.get("ns1").get("ns1-nn1").toString());
assertEquals(NS1_NN2_HOST,map.get("ns1").get("ns1-nn2").toString());
assertEquals(NS2_NN1_HOST,map.get("ns2").get("ns2-nn1").toString());
assertEquals(NS2_NN2_HOST,map.get("ns2").get("ns2-nn2").toString());
assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn1"));
assertEquals(NS1_NN2_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn2"));
assertEquals(NS2_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns2","ns2-nn1"));
assertEquals(null,DFSUtil.getNamenodeServiceAddr(conf,null,"ns1-nn1"));
assertEquals(null,DFSUtil.getNamenodeNameServiceId(conf));
assertEquals(null,DFSUtil.getSecondaryNameServiceId(conf));
Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_RPC_ADDRESS_KEY);
assertEquals(2,uris.size());
assertTrue(uris.contains(new URI("hdfs://ns1")));
assertTrue(uris.contains(new URI("hdfs://ns2")));
}
APIUtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier
@Test(timeout=15000) public void testLocalhostReverseLookup(){
Assume.assumeTrue(!Shell.WINDOWS);
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://127.0.0.1:8020");
Collection uris=DFSUtil.getNameServiceUris(conf);
assertEquals(1,uris.size());
for ( URI uri : uris) {
assertThat(uri.getHost(),not("127.0.0.1"));
}
}
InternalCallVerifier EqualityVerifier
/**
* Test to ensure nameservice specific keys in the configuration are
* copied to generic keys when the namenode starts.
*/
@Test public void testConfModificationFederationOnly(){
final HdfsConfiguration conf=new HdfsConfiguration();
String nsId="ns1";
conf.set(DFS_NAMESERVICES,nsId);
conf.set(DFS_NAMESERVICE_ID,nsId);
for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
conf.set(DFSUtil.addKeySuffixes(key,nsId),key);
}
NameNode.initializeGenericKeys(conf,nsId,null);
for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) {
assertEquals(key,conf.get(key));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}{@link DFSUtil#getNameServiceIdFromAddress(Configuration,InetSocketAddress,String)(Configuration)}
*/
@Test public void testMultipleNamenodes() throws IOException {
HdfsConfiguration conf=new HdfsConfiguration();
conf.set(DFS_NAMESERVICES,"nn1,nn2");
final String NN1_ADDRESS="localhost:9000";
final String NN2_ADDRESS="localhost:9001";
final String NN3_ADDRESS="localhost:9002";
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn1"),NN1_ADDRESS);
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn2"),NN2_ADDRESS);
Map> nnMap=DFSUtil.getNNServiceRpcAddresses(conf);
assertEquals(2,nnMap.size());
Map nn1Map=nnMap.get("nn1");
assertEquals(1,nn1Map.size());
InetSocketAddress addr=nn1Map.get(null);
assertEquals("localhost",addr.getHostName());
assertEquals(9000,addr.getPort());
Map nn2Map=nnMap.get("nn2");
assertEquals(1,nn2Map.size());
addr=nn2Map.get(null);
assertEquals("localhost",addr.getHostName());
assertEquals(9001,addr.getPort());
checkNameServiceId(conf,NN1_ADDRESS,"nn1");
checkNameServiceId(conf,NN2_ADDRESS,"nn2");
checkNameServiceId(conf,NN3_ADDRESS,null);
assertFalse(HAUtil.isHAEnabled(conf,"nn1"));
assertFalse(HAUtil.isHAEnabled(conf,"nn2"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the client respects its keepalive timeout.
*/
@Test(timeout=30000) public void testClientResponsesKeepAliveTimeout() throws Exception {
Configuration clientConf=new Configuration(conf);
final long CLIENT_EXPIRY_MS=10L;
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT,"testClientResponsesKeepAliveTimeout");
DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf);
PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L);
assertEquals(0,peerCache.size());
assertXceiverCount(0);
DFSTestUtil.readFile(fs,TEST_FILE);
assertEquals(1,peerCache.size());
assertXceiverCount(1);
Thread.sleep(CLIENT_EXPIRY_MS + 1);
Peer peer=peerCache.get(dn.getDatanodeId(),false);
assertTrue(peer == null);
assertEquals(0,peerCache.size());
}
InternalCallVerifier BooleanVerifier
/**
* Test for the case where the client beings to read a long block, but doesn't
* read bytes off the stream quickly. The datanode should time out sending the
* chunks and the transceiver should die, even if it has a long keepalive.
*/
@Test(timeout=300000) public void testSlowReader() throws Exception {
final long CLIENT_EXPIRY_MS=600000L;
Configuration clientConf=new Configuration(conf);
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT,"testSlowReader");
DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf);
DataNodeProperties props=cluster.stopDataNode(0);
props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,WRITE_TIMEOUT);
props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,120000);
assertTrue(cluster.restartDataNode(props,true));
dn=cluster.getDataNodes().get(0);
cluster.triggerHeartbeats();
DFSTestUtil.createFile(fs,TEST_FILE,1024 * 1024 * 8L,(short)1,0L);
FSDataInputStream stm=fs.open(TEST_FILE);
stm.read();
assertXceiverCount(1);
GenericTestUtils.waitFor(new Supplier(){
public Boolean get(){
return getXceiverCountWithoutServer() == 0;
}
}
,500,50000);
IOUtils.closeStream(stm);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Regression test for HDFS-3357. Check that the datanode is respecting
* its configured keepalive timeout.
*/
@Test(timeout=30000) public void testDatanodeRespectsKeepAliveTimeout() throws Exception {
Configuration clientConf=new Configuration(conf);
final long CLIENT_EXPIRY_MS=60000L;
clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS);
clientConf.set(DFS_CLIENT_CONTEXT,"testDatanodeRespectsKeepAliveTimeout");
DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf);
PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L);
assertEquals(0,peerCache.size());
assertXceiverCount(0);
DFSTestUtil.readFile(fs,TEST_FILE);
assertEquals(1,peerCache.size());
assertXceiverCount(1);
Thread.sleep(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT + 1);
assertXceiverCount(0);
assertEquals(1,peerCache.size());
Peer peer=peerCache.get(dn.getDatanodeId(),false);
assertNotNull(peer);
assertEquals(-1,peer.getInputStream().read());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testManyClosedSocketsInCache() throws Exception {
Configuration clientConf=new Configuration(conf);
clientConf.set(DFS_CLIENT_CONTEXT,"testManyClosedSocketsInCache");
DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf);
PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache();
DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L);
InputStream[] stms=new InputStream[5];
try {
for (int i=0; i < stms.length; i++) {
stms[i]=fs.open(TEST_FILE);
}
for ( InputStream stm : stms) {
IOUtils.copyBytes(stm,new NullOutputStream(),1024);
}
}
finally {
IOUtils.cleanup(null,stms);
}
assertEquals(5,peerCache.size());
Thread.sleep(1500);
assertXceiverCount(0);
assertEquals(5,peerCache.size());
DFSTestUtil.readFile(fs,TEST_FILE);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPacketHeader() throws IOException {
PacketHeader hdr=new PacketHeader(4,1024,100,false,4096,false);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
hdr.write(new DataOutputStream(baos));
PacketHeader readBack=new PacketHeader();
ByteArrayInputStream bais=new ByteArrayInputStream(baos.toByteArray());
readBack.readFields(new DataInputStream(bais));
assertEquals(hdr,readBack);
readBack=new PacketHeader();
readBack.readFields(ByteBuffer.wrap(baos.toByteArray()));
assertEquals(hdr,readBack);
assertTrue(hdr.sanityCheck(99));
assertFalse(hdr.sanityCheck(100));
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDuplicateScans() throws Exception {
long startTime=Time.monotonicNow();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build();
FileSystem fs=null;
try {
fs=cluster.getFileSystem();
DataNode dataNode=cluster.getDataNodes().get(0);
int infoPort=dataNode.getInfoPort();
long scanTimeBefore=0, scanTimeAfter=0;
for (int i=1; i < 10; i++) {
Path fileName=new Path("/test" + i);
DFSTestUtil.createFile(fs,fileName,1024,(short)1,1000L);
waitForVerification(infoPort,fs,fileName,i,startTime,TIMEOUT);
if (i > 1) {
scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (i - 1))));
assertFalse("scan time shoud not be 0",scanTimeAfter == 0);
assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter);
}
scanTimeBefore=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + i)));
}
cluster.restartDataNode(0);
Thread.sleep(10000);
dataNode=cluster.getDataNodes().get(0);
scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (9))));
assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter);
}
finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Test if NameNode handles truncated blocks in block report
*/
@Test public void testTruncatedBlockReport() throws Exception {
final Configuration conf=new HdfsConfiguration();
final short REPLICATION_FACTOR=(short)2;
final Path fileName=new Path("/file1");
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3L);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,3);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,3L);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,false);
long startTime=Time.monotonicNow();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).build();
cluster.waitActive();
ExtendedBlock block;
try {
FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,fileName,1,REPLICATION_FACTOR,0);
DFSTestUtil.waitReplication(fs,fileName,REPLICATION_FACTOR);
block=DFSTestUtil.getFirstBlock(fs,fileName);
}
finally {
cluster.shutdown();
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).format(false).build();
cluster.waitActive();
try {
FileSystem fs=cluster.getFileSystem();
int infoPort=cluster.getDataNodes().get(0).getInfoPort();
assertTrue(waitForVerification(infoPort,fs,fileName,1,startTime,TIMEOUT) >= startTime);
if (!changeReplicaLength(block,0,-1)) {
throw new IOException("failed to find or change length of replica on node 0 " + cluster.getDataNodes().get(0).getDisplayName());
}
}
finally {
cluster.shutdown();
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).format(false).build();
cluster.startDataNodes(conf,1,true,null,null);
cluster.waitActive();
cluster.waitClusterUp();
assertFalse("failed to leave safe mode",cluster.getNameNode().isInSafeMode());
try {
DFSTestUtil.waitReplication(cluster.getFileSystem(),fileName,REPLICATION_FACTOR);
waitForBlockDeleted(block,0,TIMEOUT);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testDatanodeBlockScanner() throws IOException, TimeoutException {
long startTime=Time.monotonicNow();
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path file1=new Path("/tmp/testBlockVerification/file1");
Path file2=new Path("/tmp/testBlockVerification/file2");
DFSTestUtil.createFile(fs,file1,10,(short)1,0);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(false).build();
cluster.waitActive();
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
fs=cluster.getFileSystem();
DatanodeInfo dn=dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
assertTrue(waitForVerification(dn.getInfoPort(),fs,file1,1,startTime,TIMEOUT) >= startTime);
DFSTestUtil.createFile(fs,file2,10,(short)1,0);
IOUtils.copyBytes(fs.open(file2),new IOUtils.NullOutputStream(),conf,true);
assertTrue(waitForVerification(dn.getInfoPort(),fs,file2,2,startTime,TIMEOUT) >= startTime);
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testBlockCorruptionPolicy() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
Random random=new Random();
FileSystem fs=null;
int rand=random.nextInt(3);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path file1=new Path("/tmp/testBlockVerification/file1");
DFSTestUtil.createFile(fs,file1,1024,(short)3,0);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1);
DFSTestUtil.waitReplication(fs,file1,(short)3);
assertFalse(DFSTestUtil.allBlockReplicasCorrupt(cluster,file1,0));
assertTrue(MiniDFSCluster.corruptReplica(rand,block));
cluster.restartDataNode(rand);
DFSTestUtil.waitReplication(fs,file1,(short)2);
assertFalse(DFSTestUtil.allBlockReplicasCorrupt(cluster,file1,0));
assertTrue(MiniDFSCluster.corruptReplica(0,block));
assertTrue(MiniDFSCluster.corruptReplica(1,block));
assertTrue(MiniDFSCluster.corruptReplica(2,block));
for ( DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.runBlockScannerForBlock(dn,block);
}
DFSTestUtil.waitReplication(fs,file1,(short)3);
assertTrue(DFSTestUtil.allBlockReplicasCorrupt(cluster,file1,0));
cluster.shutdown();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test that a data-node does not start if configuration specifies
* incorrect URI scheme in data directory.
* Test that a data-node starts if data directory is specified as
* URI = "file:///path" or as a non URI path.
*/
@Test public void testDataDirectories() throws IOException {
File dataDir=new File(BASE_DIR,"data").getCanonicalFile();
Configuration conf=cluster.getConfiguration(0);
String dnDir=makeURI("shv",null,fileAsURI(dataDir).getPath());
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir);
DataNode dn=null;
try {
dn=DataNode.createDataNode(new String[]{},conf);
fail();
}
catch ( Exception e) {
}
finally {
if (dn != null) {
dn.shutdown();
}
}
assertNull("Data-node startup should have failed.",dn);
String dnDir1=fileAsURI(dataDir).toString() + "1";
String dnDir2=makeURI("file","localhost",fileAsURI(dataDir).getPath() + "2");
String dnDir3=dataDir.getAbsolutePath() + "3";
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dnDir1 + "," + dnDir2+ ","+ dnDir3);
try {
cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
assertTrue("Data-node should startup.",cluster.isDataNodeUp());
}
finally {
if (cluster != null) {
cluster.shutdownDataNodes();
}
}
}
BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Regression test for HDFS-894 ensures that, when datanodes
* are restarted, the new IPC port is registered with the
* namenode.
*/
@Test public void testChangeIpcPort() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
cluster.restartDataNodes();
DatanodeInfo[] report=client.datanodeReport(DatanodeReportType.ALL);
long firstUpdateAfterRestart=report[0].getLastUpdate();
boolean gotHeartbeat=false;
for (int i=0; i < 10 && !gotHeartbeat; i++) {
try {
Thread.sleep(i * 1000);
}
catch ( InterruptedException ie) {
}
report=client.datanodeReport(DatanodeReportType.ALL);
gotHeartbeat=(report[0].getLastUpdate() > firstUpdateAfterRestart);
}
if (!gotHeartbeat) {
fail("Never got a heartbeat from restarted datanode.");
}
int realIpcPort=cluster.getDataNodes().get(0).getIpcPort();
assertEquals(realIpcPort,report[0].getIpcPort());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testChangeStorageID() throws Exception {
final String DN_IP_ADDR="127.0.0.1";
final String DN_HOSTNAME="localhost";
final int DN_XFER_PORT=12345;
final int DN_INFO_PORT=12346;
final int DN_INFO_SECURE_PORT=12347;
final int DN_IPC_PORT=12348;
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
NamenodeProtocols rpcServer=cluster.getNameNodeRpc();
DatanodeID dnId=new DatanodeID(DN_IP_ADDR,DN_HOSTNAME,"fake-datanode-id",DN_XFER_PORT,DN_INFO_PORT,DN_INFO_SECURE_PORT,DN_IPC_PORT);
long nnCTime=cluster.getNamesystem().getFSImage().getStorage().getCTime();
StorageInfo mockStorageInfo=mock(StorageInfo.class);
doReturn(nnCTime).when(mockStorageInfo).getCTime();
doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo).getLayoutVersion();
DatanodeRegistration dnReg=new DatanodeRegistration(dnId,mockStorageInfo,null,VersionInfo.getVersion());
rpcServer.registerDatanode(dnReg);
DatanodeInfo[] report=client.datanodeReport(DatanodeReportType.ALL);
assertEquals("Expected a registered datanode",1,report.length);
dnId=new DatanodeID(DN_IP_ADDR,DN_HOSTNAME,"changed-fake-datanode-id",DN_XFER_PORT,DN_INFO_PORT,DN_INFO_SECURE_PORT,DN_IPC_PORT);
dnReg=new DatanodeRegistration(dnId,mockStorageInfo,null,VersionInfo.getVersion());
rpcServer.registerDatanode(dnReg);
report=client.datanodeReport(DatanodeReportType.ALL);
assertEquals("Datanode with changed storage ID not recognized",1,report.length);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests decommission with replicas on the target datanode cannot be migrated
* to other datanodes and satisfy the replication factor. Make sure the
* datanode won't get stuck in decommissioning state.
*/
@Test(timeout=360000) public void testDecommission2() throws IOException {
LOG.info("Starting test testDecommission");
int numNamenodes=1;
int numDatanodes=4;
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
startCluster(numNamenodes,numDatanodes,conf);
ArrayList> namenodeDecomList=new ArrayList>(numNamenodes);
namenodeDecomList.add(0,new ArrayList(numDatanodes));
Path file1=new Path("testDecommission2.dat");
int replicas=4;
ArrayList decommissionedNodes=namenodeDecomList.get(0);
FileSystem fileSys=cluster.getFileSystem(0);
FSNamesystem ns=cluster.getNamesystem(0);
writeFile(fileSys,file1,replicas);
int deadDecomissioned=ns.getNumDecomDeadDataNodes();
int liveDecomissioned=ns.getNumDecomLiveDataNodes();
DatanodeInfo decomNode=decommissionNode(0,null,decommissionedNodes,AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
assertEquals(deadDecomissioned,ns.getNumDecomDeadDataNodes());
assertEquals(liveDecomissioned + 1,ns.getNumDecomLiveDataNodes());
DFSClient client=getDfsClient(cluster.getNameNode(0),conf);
assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length);
assertNull(checkFile(fileSys,file1,replicas,decomNode.getXferAddr(),numDatanodes));
cleanupFile(fileSys,file1);
cluster.shutdown();
startCluster(1,4,conf);
cluster.shutdown();
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test using a "registration name" in a host include file.
* Registration names are DataNode names specified in the configuration by
* dfs.datanode.hostname. The DataNode will send this name to the NameNode
* as part of its registration. Registration names are helpful when you
* want to override the normal first result of DNS resolution on the
* NameNode. For example, a given datanode IP may map to two hostnames,
* and you may want to choose which hostname is used internally in the
* cluster.
* It is not recommended to use a registration name which is not also a
* valid DNS hostname for the DataNode. See HDFS-5237 for background.
*/
@Test(timeout=360000) public void testIncludeByRegistrationName() throws IOException, InterruptedException {
Configuration hdfsConf=new Configuration(conf);
final String registrationName="127.0.0.100";
final String nonExistentDn="127.0.0.10";
hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,registrationName);
cluster=new MiniDFSCluster.Builder(hdfsConf).numDataNodes(1).checkDataNodeHostConfig(true).setupHostsFile(true).build();
cluster.waitActive();
ArrayList nodes=new ArrayList();
nodes.add(nonExistentDn);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
DFSClient client=getDfsClient(cluster.getNameNode(0),hdfsConf);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.DEAD);
if (info.length == 1) {
break;
}
LOG.info("Waiting for datanode to be marked dead");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
int dnPort=cluster.getDataNodes().get(0).getXferPort();
nodes=new ArrayList();
nodes.add(registrationName + ":" + dnPort);
writeConfigFile(hostsFile,nodes);
refreshNodes(cluster.getNamesystem(0),hdfsConf);
cluster.restartDataNode(0);
while (true) {
DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.LIVE);
if (info.length == 1) {
Assert.assertFalse(info[0].isDecommissioned());
Assert.assertFalse(info[0].isDecommissionInProgress());
assertEquals(registrationName,info[0].getHostName());
break;
}
LOG.info("Waiting for datanode to come back");
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests restart of namenode while datanode hosts are added to exclude file
*/
@Test(timeout=360000) public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithNamenodeRestart");
int numNamenodes=1;
int numDatanodes=1;
int replicas=1;
startCluster(numNamenodes,numDatanodes,conf);
Path file1=new Path("testDecommission.dat");
FileSystem fileSys=cluster.getFileSystem();
writeFile(fileSys,file1,replicas);
DFSClient client=getDfsClient(cluster.getNameNode(),conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
DatanodeID excludedDatanodeID=info[0];
String excludedDatanodeName=info[0].getXferAddr();
writeConfigFile(excludeFile,new ArrayList(Arrays.asList(excludedDatanodeName)));
cluster.startDataNodes(conf,1,true,null,null,null,null);
numDatanodes+=1;
assertEquals("Number of datanodes should be 2 ",2,cluster.getDataNodes().size());
cluster.restartNameNode();
DatanodeInfo datanodeInfo=NameNodeAdapter.getDatanode(cluster.getNamesystem(),excludedDatanodeID);
waitNodeState(datanodeInfo,AdminStates.DECOMMISSIONED);
assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length);
int tries=0;
while (tries++ < 20) {
try {
Thread.sleep(1000);
if (checkFile(fileSys,file1,replicas,datanodeInfo.getXferAddr(),numDatanodes) == null) {
break;
}
}
catch ( InterruptedException ie) {
}
}
assertTrue("Checked if block was replicated after decommission, tried " + tries + " times.",tries < 20);
cleanupFile(fileSys,file1);
cluster.shutdown();
startCluster(numNamenodes,numDatanodes,conf);
cluster.shutdown();
}
InternalCallVerifier BooleanVerifier
@Test public void testDeprecatedKeys() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.set("topology.script.file.name","xyz");
String scriptFile=conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
assertTrue(scriptFile.equals("xyz"));
conf.setInt("dfs.replication.interval",1);
String alpha=DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY;
int repInterval=conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,3);
assertTrue(repInterval == 1);
}
InternalCallVerifier EqualityVerifier
/**
* Test that the socket cache can be disabled by setting the capacity to
* 0. Regression test for HDFS-3365.
* @throws Exception
*/
@Test public void testDisableCache() throws Exception {
HdfsConfiguration confWithoutCache=new HdfsConfiguration();
confWithoutCache.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,0);
BlockReaderTestUtil util=new BlockReaderTestUtil(1,confWithoutCache);
final Path testFile=new Path("/testConnCache.dat");
util.writeFile(testFile,FILE_SIZE / 1024);
FileSystem fsWithoutCache=FileSystem.newInstance(util.getConf());
try {
DFSTestUtil.readFile(fsWithoutCache,testFile);
assertEquals(0,((DistributedFileSystem)fsWithoutCache).dfs.getClientContext().getPeerCache().size());
}
finally {
fsWithoutCache.close();
util.shutdown();
}
}
IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFileChecksum() throws Exception {
final long seed=RAN.nextLong();
System.out.println("seed=" + seed);
RAN.setSeed(seed);
final Configuration conf=getTestConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs=cluster.getFileSystem();
final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final UserGroupInformation current=UserGroupInformation.getCurrentUser();
final UserGroupInformation ugi=UserGroupInformation.createUserForTesting(current.getShortUserName() + "x",new String[]{"user"});
try {
hdfs.getFileChecksum(new Path("/test/TestNonExistingFile"));
fail("Expecting FileNotFoundException");
}
catch ( FileNotFoundException e) {
assertTrue("Not throwing the intended exception message",e.getMessage().contains("File does not exist: /test/TestNonExistingFile"));
}
try {
Path path=new Path("/test/TestExistingDir/");
hdfs.mkdirs(path);
hdfs.getFileChecksum(path);
fail("Expecting FileNotFoundException");
}
catch ( FileNotFoundException e) {
assertTrue("Not throwing the intended exception message",e.getMessage().contains("Path is not a file: /test/TestExistingDir"));
}
final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs=ugi.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return new Path(webhdfsuri).getFileSystem(conf);
}
}
);
final Path dir=new Path("/filechecksum");
final int block_size=1024;
final int buffer_size=conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,512);
for (int n=0; n < 5; n++) {
final byte[] data=new byte[RAN.nextInt(block_size / 2 - 1) + n * block_size + 1];
RAN.nextBytes(data);
System.out.println("data.length=" + data.length);
final Path foo=new Path(dir,"foo" + n);
{
final FSDataOutputStream out=hdfs.create(foo,false,buffer_size,(short)2,block_size);
out.write(data);
out.close();
}
final FileChecksum hdfsfoocs=hdfs.getFileChecksum(foo);
System.out.println("hdfsfoocs=" + hdfsfoocs);
final FileChecksum webhdfsfoocs=webhdfs.getFileChecksum(foo);
System.out.println("webhdfsfoocs=" + webhdfsfoocs);
final Path webhdfsqualified=new Path(webhdfsuri + dir,"foo" + n);
final FileChecksum webhdfs_qfoocs=webhdfs.getFileChecksum(webhdfsqualified);
System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs);
final Path zeroByteFile=new Path(dir,"zeroByteFile" + n);
{
final FSDataOutputStream out=hdfs.create(zeroByteFile,false,buffer_size,(short)2,block_size);
out.close();
}
{
final FileChecksum zeroChecksum=hdfs.getFileChecksum(zeroByteFile);
assertEquals(zeroChecksum.toString(),"MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51");
}
final Path bar=new Path(dir,"bar" + n);
{
final FSDataOutputStream out=hdfs.create(bar,false,buffer_size,(short)2,block_size);
out.write(data);
out.close();
}
{
final FileChecksum barcs=hdfs.getFileChecksum(bar);
final int barhashcode=barcs.hashCode();
assertEquals(hdfsfoocs.hashCode(),barhashcode);
assertEquals(hdfsfoocs,barcs);
assertEquals(webhdfsfoocs.hashCode(),barhashcode);
assertEquals(webhdfsfoocs,barcs);
assertEquals(webhdfs_qfoocs.hashCode(),barhashcode);
assertEquals(webhdfs_qfoocs,barcs);
}
hdfs.setPermission(dir,new FsPermission((short)0));
{
try {
webhdfs.getFileChecksum(webhdfsqualified);
fail();
}
catch ( IOException ioe) {
FileSystem.LOG.info("GOOD: getting an exception",ioe);
}
}
hdfs.setPermission(dir,new FsPermission((short)0777));
}
cluster.shutdown();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateWithCustomChecksum() throws Exception {
Configuration conf=getTestConfiguration();
MiniDFSCluster cluster=null;
Path testBasePath=new Path("/test/csum");
Path path1=new Path(testBasePath,"file_wtih_crc1");
Path path2=new Path(testBasePath,"file_with_crc2");
ChecksumOpt opt1=new ChecksumOpt(DataChecksum.Type.CRC32C,512);
ChecksumOpt opt2=new ChecksumOpt(DataChecksum.Type.CRC32,512);
FsPermission perm=FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf));
EnumSet flags=EnumSet.of(CreateFlag.OVERWRITE,CreateFlag.CREATE);
short repl=1;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem dfs=cluster.getFileSystem();
dfs.mkdirs(testBasePath);
FSDataOutputStream out1=dfs.create(path1,perm,flags,4096,repl,131072L,null,opt1);
FSDataOutputStream out2=dfs.create(path2,perm,flags,4096,repl,131072L,null,opt2);
for (int i=0; i < 1024; i++) {
out1.write(i);
out2.write(i);
}
out1.close();
out2.close();
MD5MD5CRC32FileChecksum sum1=(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path1);
MD5MD5CRC32FileChecksum sum2=(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path2);
assertFalse(sum1.equals(sum2));
assertEquals(DataChecksum.Type.CRC32C,sum1.getCrcType());
assertEquals(DataChecksum.Type.CRC32,sum2.getCrcType());
}
finally {
if (cluster != null) {
cluster.getFileSystem().delete(testBasePath,true);
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests error paths for{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
*/
@Test(timeout=60000) public void testGetFileBlockStorageLocationsError() throws Exception {
final Configuration conf=getTestConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,1500);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.getDataNodes();
final DistributedFileSystem fs=cluster.getFileSystem();
final Path tmpFile1=new Path("/errorfile1.dat");
final Path tmpFile2=new Path("/errorfile2.dat");
DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl);
DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
List list=Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024)));
int totalRepl=0;
for ( BlockLocation loc : list) {
totalRepl+=loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
}
catch ( IOException e) {
}
return false;
}
}
,500,30000);
BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024);
BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024);
List allLocs=Lists.newArrayList();
allLocs.addAll(Arrays.asList(blockLocs1));
allLocs.addAll(Arrays.asList(blockLocs2));
DataNodeFaultInjector injector=Mockito.mock(DataNodeFaultInjector.class);
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
Thread.sleep(3000);
return null;
}
}
).when(injector).getHdfsBlocksMetadata();
DataNodeFaultInjector.instance=injector;
BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(allLocs);
for ( BlockStorageLocation loc : locs) {
assertEquals("Found more than 0 cached hosts although RPCs supposedly timed out",0,loc.getCachedHosts().length);
}
DataNodeFaultInjector.instance=new DataNodeFaultInjector();
DataNodeProperties stoppedNode=cluster.stopDataNode(0);
locs=fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocation for two 1-block files",2,locs.length);
for ( BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block",2,l.getHosts().length);
assertEquals("Expected two VolumeIDs for each block",2,l.getVolumeIds().length);
assertTrue("Expected one valid and one invalid volume",(l.getVolumeIds()[0] == null) ^ (l.getVolumeIds()[1] == null));
}
cluster.restartDataNode(stoppedNode,true);
cluster.waitActive();
fs.delete(tmpFile2,true);
HATestUtil.waitForNNToIssueDeletions(cluster.getNameNode());
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
locs=fs.getFileBlockStorageLocations(allLocs);
assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length);
assertNotNull(locs[0].getVolumeIds()[0]);
assertNotNull(locs[0].getVolumeIds()[1]);
assertNull(locs[1].getVolumeIds()[0]);
assertNull(locs[1].getVolumeIds()[1]);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testFileCloseStatus() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs=cluster.getFileSystem();
try {
Path file=new Path("/simpleFlush.dat");
FSDataOutputStream output=fs.create(file);
output.writeBytes("Some test data");
output.flush();
assertFalse("File status should be open",fs.isFileClosed(file));
output.close();
assertTrue("File status should be closed",fs.isFileClosed(file));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDFSClient() throws Exception {
Configuration conf=getTestConfiguration();
final long grace=1000L;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final String filepathstring="/test/LeaseChecker/foo";
final Path[] filepaths=new Path[4];
for (int i=0; i < filepaths.length; i++) {
filepaths[i]=new Path(filepathstring + i);
}
final long millis=Time.now();
{
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.dfs.getLeaseRenewer().setGraceSleepPeriod(grace);
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
{
final FSDataOutputStream out=dfs.create(filepaths[0]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out.close();
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
for (int i=0; i < 3; i++) {
if (dfs.dfs.getLeaseRenewer().isRunning()) {
Thread.sleep(grace / 2);
}
}
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
}
{
final FSDataOutputStream out1=dfs.create(filepaths[1]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
final FSDataOutputStream out2=dfs.create(filepaths[2]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out1.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out1.close();
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out2.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out2.close();
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
}
{
final FSDataOutputStream out3=dfs.create(filepaths[3]);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out3.writeLong(millis);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
out3.close();
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
Thread.sleep(grace / 4 * 3);
assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
for (int i=0; i < 3; i++) {
if (dfs.dfs.getLeaseRenewer().isRunning()) {
Thread.sleep(grace / 2);
}
}
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
}
dfs.close();
}
{
FileSystem fs=cluster.getFileSystem();
Path dir=new Path("/wrwelkj");
assertFalse("File should not exist for test.",fs.exists(dir));
try {
FSDataInputStream in=fs.open(dir);
try {
in.close();
fs.close();
}
finally {
assertTrue("Did not get a FileNotFoundException for non-existing" + " file.",false);
}
}
catch ( FileNotFoundException fnf) {
}
}
{
final DistributedFileSystem dfs=cluster.getFileSystem();
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
FSDataInputStream in=dfs.open(filepaths[0]);
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
assertEquals(millis,in.readLong());
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
in.close();
assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
dfs.close();
}
{
String uri="hdfs://127.0.0.1:" + cluster.getNameNodePort() + "/test/ipAddress/file";
Path path=new Path(uri);
FileSystem fs=FileSystem.get(path.toUri(),conf);
FSDataOutputStream out=fs.create(path);
byte[] buf=new byte[1024];
out.write(buf);
out.close();
FSDataInputStream in=fs.open(path);
in.readFully(buf);
in.close();
fs.close();
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests the normal path of batching up BlockLocation[]s to be passed to a
* single{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}call
*/
@Test(timeout=60000) public void testGetFileBlockStorageLocationsBatching() throws Exception {
final Configuration conf=getTestConfiguration();
((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)BlockStorageLocationUtil.LOG).getLogger().setLevel(Level.TRACE);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.TRACE);
conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final DistributedFileSystem fs=cluster.getFileSystem();
final Path tmpFile1=new Path("/tmpfile1.dat");
final Path tmpFile2=new Path("/tmpfile2.dat");
DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl);
DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
List list=Lists.newArrayList();
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024)));
list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024)));
int totalRepl=0;
for ( BlockLocation loc : list) {
totalRepl+=loc.getHosts().length;
}
if (totalRepl == 4) {
return true;
}
}
catch ( IOException e) {
}
return false;
}
}
,500,30000);
BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024);
BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024);
BlockLocation[] blockLocs=(BlockLocation[])ArrayUtils.addAll(blockLocs1,blockLocs2);
BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(Arrays.asList(blockLocs));
int counter=0;
for ( BlockStorageLocation l : locs) {
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
if (id != null) {
System.out.println("Datanode " + name + " has block "+ counter+ " on volume id "+ id.toString());
}
}
counter++;
}
assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length);
for ( BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block",2,l.getVolumeIds().length);
for (int i=0; i < l.getVolumeIds().length; i++) {
VolumeId id=l.getVolumeIds()[i];
String name=l.getNames()[i];
assertTrue("Expected block to be valid on datanode " + name,id != null);
}
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLongLivedClient() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
BlockTokenSecretManager btsm=cluster.getNamesystem().getBlockManager().getBlockTokenSecretManager();
btsm.setKeyUpdateIntervalForTesting(2 * 1000);
btsm.setTokenLifetime(2 * 1000);
btsm.clearAllKeysForTesting();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
LOG.info("Sleeping so that encryption keys expire...");
Thread.sleep(15 * 1000);
LOG.info("Done sleeping.");
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLongLivedReadClientAfterRestart() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
cluster.restartNameNode();
assertTrue(cluster.restartDataNode(0));
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptedRead() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptedAppendRequiringBlockTransfer() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FSDataInputStream in=fs.open(TEST_PATH);
List locatedBlocks=DFSTestUtil.getAllBlocks(in);
in.close();
assertEquals(1,locatedBlocks.size());
assertEquals(3,locatedBlocks.get(0).getLocations().length);
DataNode dn=cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort());
dn.shutdown();
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptedReadAfterNameNodeRestart() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
cluster.restartNameNode();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLongLivedWriteClientAfterRestart() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
cluster.restartNameNode();
assertTrue(cluster.restartDataNodes());
cluster.waitActive();
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncryptedReadWithRC4() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
FileChecksum checksum=fs.getFileChecksum(TEST_PATH);
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
conf.set(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY,"rc4");
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
assertEquals(checksum,fs.getFileChecksum(TEST_PATH));
fs.close();
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testClientThatDoesNotSupportEncryption() throws IOException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=getFileSystem(conf);
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
fs.close();
cluster.shutdown();
setEncryptionConfigKeys(conf);
cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build();
fs=getFileSystem(conf);
DFSClient client=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
DFSClient spyClient=Mockito.spy(client);
Mockito.doReturn(false).when(spyClient).shouldEncryptData();
DFSClientAdapter.setDFSClient((DistributedFileSystem)fs,spyClient);
LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataNode.class));
try {
assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH));
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) {
fail("Should not have been able to read without encryption enabled.");
}
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Could not obtain block:",ioe);
}
finally {
logs.stopCapturing();
}
fs.close();
if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) {
GenericTestUtils.assertMatches(logs.getOutput(),"Failed to read expected encryption handshake from client at");
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test getEncryptionZoneForPath as a non super user.
*/
@Test(timeout=60000) public void testGetEZAsNonSuperUser() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
final Path testRoot=new Path(fsHelper.getTestRootDir());
final Path superPath=new Path(testRoot,"superuseronly");
final Path superPathFile=new Path(superPath,"file1");
final Path allPath=new Path(testRoot,"accessall");
final Path allPathFile=new Path(allPath,"file1");
final Path nonEZDir=new Path(testRoot,"nonEZDir");
final Path nonEZFile=new Path(nonEZDir,"file1");
final int len=8192;
fsWrapper.mkdir(testRoot,new FsPermission((short)0777),true);
fsWrapper.mkdir(superPath,new FsPermission((short)0700),false);
fsWrapper.mkdir(allPath,new FsPermission((short)0777),false);
fsWrapper.mkdir(nonEZDir,new FsPermission((short)0777),false);
dfsAdmin.createEncryptionZone(superPath,TEST_KEY);
dfsAdmin.createEncryptionZone(allPath,TEST_KEY);
dfsAdmin.allowSnapshot(new Path("/"));
final Path newSnap=fs.createSnapshot(new Path("/"));
DFSTestUtil.createFile(fs,superPathFile,len,(short)1,0xFEED);
DFSTestUtil.createFile(fs,allPathFile,len,(short)1,0xFEED);
DFSTestUtil.createFile(fs,nonEZFile,len,(short)1,0xFEED);
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final HdfsAdmin userAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
try {
userAdmin.getEncryptionZoneForPath(null);
fail("should have thrown NPE");
}
catch ( NullPointerException e) {
}
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPath).getPath().toString());
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPathFile).getPath().toString());
try {
userAdmin.getEncryptionZoneForPath(superPathFile);
fail("expected AccessControlException");
}
catch ( AccessControlException e) {
assertExceptionContains("Permission denied:",e);
}
assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZDir));
assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZFile));
String snapshottedAllPath=newSnap.toString() + allPath.toString();
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
fs.delete(allPathFile,false);
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
fs.delete(allPath,true);
assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString());
assertNull("expected null for deleted file path",userAdmin.getEncryptionZoneForPath(allPathFile));
assertNull("expected null for deleted directory path",userAdmin.getEncryptionZoneForPath(allPath));
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testReadWrite() throws Exception {
final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
final Path baseFile=new Path("/base");
final int len=8192;
DFSTestUtil.createFile(fs,baseFile,len,(short)1,0xFEED);
final Path zone=new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone,TEST_KEY);
final Path encFile1=new Path(zone,"myfile");
DFSTestUtil.createFile(fs,encFile1,len,(short)1,0xFEED);
verifyFilesEqual(fs,baseFile,encFile1,len);
assertNumZones(1);
String keyName=dfsAdmin.listEncryptionZones().next().getKeyName();
cluster.getNamesystem().getProvider().rollNewVersion(keyName);
verifyFilesEqual(fs,baseFile,encFile1,len);
final Path encFile2=new Path(zone,"myfile2");
DFSTestUtil.createFile(fs,encFile2,len,(short)1,0xFEED);
FileEncryptionInfo feInfo1=getFileEncryptionInfo(encFile1);
FileEncryptionInfo feInfo2=getFileEncryptionInfo(encFile2);
assertFalse("EDEKs should be different",Arrays.equals(feInfo1.getEncryptedDataEncryptionKey(),feInfo2.getEncryptedDataEncryptionKey()));
assertNotEquals("Key was rolled, versions should be different",feInfo1.getEzKeyVersionName(),feInfo2.getEzKeyVersionName());
verifyFilesEqual(fs,encFile1,encFile2,len);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testCipherSuiteNegotiation() throws Exception {
final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
final Path zone=new Path("/zone");
fs.mkdirs(zone);
dfsAdmin.createEncryptionZone(zone,TEST_KEY);
DFSTestUtil.createFile(fs,new Path(zone,"success1"),0,(short)1,0xFEED);
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(0);
try {
DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED);
fail("Created a file without specifying a CipherSuite!");
}
catch ( UnknownCipherSuiteException e) {
assertExceptionContains("No cipher suites",e);
}
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
try {
DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED);
fail("Created a file without specifying a CipherSuite!");
}
catch ( UnknownCipherSuiteException e) {
assertExceptionContains("No cipher suites",e);
}
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
DFSTestUtil.createFile(fs,new Path(zone,"success2"),0,(short)1,0xFEED);
fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
DFSTestUtil.createFile(fs,new Path(zone,"success3"),4096,(short)1,0xFEED);
cluster.getNamesystem().getProvider().flush();
KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0);
List keys=provider.getKeys();
assertEquals("Expected NN to have created one key per zone",1,keys.size());
List allVersions=Lists.newArrayList();
for ( String key : keys) {
List versions=provider.getKeyVersions(key);
assertEquals("Should only have one key version per key",1,versions.size());
allVersions.addAll(versions);
}
for (int i=2; i <= 3; i++) {
FileEncryptionInfo feInfo=getFileEncryptionInfo(new Path(zone.toString() + "/success" + i));
assertEquals(feInfo.getCipherSuite(),CipherSuite.AES_CTR_NOPADDING);
}
}
InternalCallVerifier BooleanVerifier
/**
* This quite tricky test prevents acknowledgement packets from a datanode
* This should block any write attempts after ackQueue is full.
* Test is blocking, so the MiniDFSCluster has to be killed harshly.
* @throws IOException in case of an error
*/
@Test public void pipeline_06() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
final int MAX_PACKETS=80;
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
final PipelinesTestUtil.PipelinesTest pipst=(PipelinesTestUtil.PipelinesTest)PipelinesTestUtil.initTest();
pipst.setSuspend(true);
Path filePath=new Path("/" + METHOD_NAME + ".dat");
FSDataOutputStream fsOut=fs.create(filePath);
int cnt=0;
try {
QueueChecker cq=new QueueChecker(pipst,MAX_PACKETS);
cq.start();
int bytesToSend=700;
while (cnt < 100 && pipst.getSuspend()) {
if (LOG.isDebugEnabled()) {
LOG.debug("_06(): " + cnt++ + " sending another "+ bytesToSend+ " bytes");
}
TestPipelines.writeData(fsOut,bytesToSend);
}
}
catch ( Exception e) {
LOG.warn("Getting unexpected exception: ",e);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Last queued packet number " + pipst.getLastQueued());
}
assertTrue("Shouldn't be able to send more than 81 packet",pipst.getLastQueued() <= 81);
}
InternalCallVerifier EqualityVerifier
/**
* Tests appending after soft-limit expires.
*/
@Test public void testAppendAfterSoftLimit() throws IOException, InterruptedException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
final long softLimit=1L;
final long hardLimit=9999999L;
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.setLeasePeriod(softLimit,hardLimit);
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
FileSystem fs2=new DistributedFileSystem();
fs2.initialize(fs.getUri(),conf);
final Path testPath=new Path("/testAppendAfterSoftLimit");
final byte[] fileContents=AppendTestUtil.initBuffer(32);
FSDataOutputStream out=fs.create(testPath);
out.write(fileContents);
Thread.sleep(250);
try {
FSDataOutputStream appendStream2=fs2.append(testPath);
appendStream2.write(fileContents);
appendStream2.close();
assertEquals(fileContents.length,fs.getFileStatus(testPath).getLen());
}
finally {
fs.close();
fs2.close();
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Test that copy on write for blocks works correctly
* @throws IOException an exception might be thrown
*/
@Test public void testCopyOnWrite() throws IOException {
Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
try {
Path file1=new Path("/filestatus.dat");
FSDataOutputStream stm=AppendTestUtil.createFile(fs,file1,1);
writeFile(stm);
stm.close();
DataNode[] dn=cluster.listDataNodes();
assertTrue("There should be only one datanode but found " + dn.length,dn.length == 1);
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
List blocks=locations.getLocatedBlocks();
for (int i=0; i < blocks.size(); i=i + 2) {
ExtendedBlock b=blocks.get(i).getBlock();
final File f=DataNodeTestUtils.getFile(dn[0],b.getBlockPoolId(),b.getLocalBlock().getBlockId());
File link=new File(f.toString() + ".link");
System.out.println("Creating hardlink for File " + f + " to "+ link);
HardLink.createHardLink(f,link);
}
for (int i=0; i < blocks.size(); i++) {
ExtendedBlock b=blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned true",DataNodeTestUtils.unlinkBlock(dn[0],b,1));
}
for (int i=0; i < blocks.size(); i++) {
ExtendedBlock b=blocks.get(i).getBlock();
System.out.println("testCopyOnWrite detaching block " + b);
assertTrue("Detaching block " + b + " should have returned false",!DataNodeTestUtils.unlinkBlock(dn[0],b,1));
}
}
finally {
client.close();
fs.close();
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Creates one file, writes a few bytes to it and then closed it.
* Reopens the same file for appending, write all blocks and then close.
* Verify that all data exists in file.
* @throws IOException an exception might be thrown
*/
@Test public void testSimpleAppend() throws IOException {
final Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50);
fileContents=AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
try {
{
Path file1=new Path("/simpleAppend.dat");
FSDataOutputStream stm=AppendTestUtil.createFile(fs,file1,1);
System.out.println("Created file simpleAppend.dat");
int mid=186;
System.out.println("Writing " + mid + " bytes to file "+ file1);
stm.write(fileContents,0,mid);
stm.close();
System.out.println("Wrote and Closed first part of file.");
int mid2=607;
System.out.println("Writing " + mid + " bytes to file "+ file1);
stm=fs.append(file1);
stm.write(fileContents,mid,mid2 - mid);
stm.close();
System.out.println("Wrote and Closed second part of file.");
stm=fs.append(file1);
assertTrue(stm.getPos() > 0);
System.out.println("Writing " + (AppendTestUtil.FILE_SIZE - mid2) + " bytes to file "+ file1);
stm.write(fileContents,mid2,AppendTestUtil.FILE_SIZE - mid2);
System.out.println("Written second part of file");
stm.close();
System.out.println("Wrote and Closed second part of file.");
AppendTestUtil.checkFullFile(fs,file1,AppendTestUtil.FILE_SIZE,fileContents,"Read 2");
}
{
FSDataOutputStream out=null;
try {
out=fs.append(new Path("/non-existing.dat"));
fail("Expected to have FileNotFoundException");
}
catch ( java.io.FileNotFoundException fnfe) {
System.out.println("Good: got " + fnfe);
fnfe.printStackTrace(System.out);
}
finally {
IOUtils.closeStream(out);
}
}
{
Path root=new Path("/");
fs.setPermission(root,new FsPermission((short)0777));
fs.close();
final UserGroupInformation superuser=UserGroupInformation.getCurrentUser();
String username="testappenduser";
String group="testappendgroup";
assertFalse(superuser.getShortUserName().equals(username));
assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
UserGroupInformation appenduser=UserGroupInformation.createUserForTesting(username,new String[]{group});
fs=DFSTestUtil.getFileSystemAs(appenduser,conf);
Path dir=new Path(root,getClass().getSimpleName());
Path foo=new Path(dir,"foo.dat");
FSDataOutputStream out=null;
int offset=0;
try {
out=fs.create(foo);
int len=10 + AppendTestUtil.nextInt(100);
out.write(fileContents,offset,len);
offset+=len;
}
finally {
IOUtils.closeStream(out);
}
fs.setPermission(dir,new FsPermission((short)0100));
fs.setPermission(foo,new FsPermission((short)0200));
out=null;
try {
out=fs.append(foo);
int len=10 + AppendTestUtil.nextInt(100);
out.write(fileContents,offset,len);
offset+=len;
}
finally {
IOUtils.closeStream(out);
}
fs.setPermission(foo,new FsPermission((short)0577));
fs.setPermission(dir,new FsPermission((short)0777));
out=null;
try {
out=fs.append(foo);
fail("Expected to have AccessControlException");
}
catch ( AccessControlException ace) {
System.out.println("Good: got " + ace);
ace.printStackTrace(System.out);
}
finally {
IOUtils.closeStream(out);
}
}
}
catch ( IOException e) {
System.out.println("Exception :" + e);
throw e;
}
catch ( Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* TC11: Racing rename
* @throws IOException an exception might be thrown
*/
@Test public void testTC11() throws Exception {
final Path p=new Path("/TC11/foo");
System.out.println("p=" + p);
final int len1=(int)BLOCK_SIZE;
{
FSDataOutputStream out=fs.create(p,false,buffersize,REPLICATION,BLOCK_SIZE);
AppendTestUtil.write(out,0,len1);
out.close();
}
FSDataOutputStream out=fs.append(p);
final int len2=(int)BLOCK_SIZE / 2;
AppendTestUtil.write(out,len1,len2);
out.hflush();
final Path pnew=new Path(p + ".new");
assertTrue(fs.rename(p,pnew));
out.close();
final long len=fs.getFileStatus(pnew).getLen();
final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(pnew.toString(),0L,len);
final int numblock=locatedblocks.locatedBlockCount();
for (int i=0; i < numblock; i++) {
final LocatedBlock lb=locatedblocks.get(i);
final ExtendedBlock blk=lb.getBlock();
final long size=lb.getBlockSize();
if (i < numblock - 1) {
assertEquals(BLOCK_SIZE,size);
}
for ( DatanodeInfo datanodeinfo : lb.getLocations()) {
final DataNode dn=cluster.getDataNode(datanodeinfo.getIpcPort());
final Block metainfo=DataNodeTestUtils.getFSDataset(dn).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
assertEquals(size,metainfo.getNumBytes());
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Append to a partial CRC chunk and
* the first write does not fill up the partial CRC trunk
* *
* @throws IOException
*/
@Test public void testAppendToPartialChunk() throws IOException {
final Path p=new Path("/partialChunk/foo");
final int fileLen=513;
System.out.println("p=" + p);
byte[] fileContents=AppendTestUtil.initBuffer(fileLen);
FSDataOutputStream stm=AppendTestUtil.createFile(fs,p,1);
stm.write(fileContents,0,1);
stm.close();
System.out.println("Wrote 1 byte and closed the file " + p);
stm=fs.append(p);
stm.write(fileContents,1,1);
stm.hflush();
stm.close();
System.out.println("Append 1 byte and closed the file " + p);
stm=fs.append(p);
assertEquals(2,stm.getPos());
stm.write(fileContents,2,1);
stm.hflush();
System.out.println("Append and flush 1 byte");
stm.write(fileContents,3,2);
stm.hflush();
System.out.println("Append and flush 2 byte");
stm.write(fileContents,5,fileLen - 5);
stm.close();
System.out.println("Flush 508 byte and closed the file " + p);
AppendTestUtil.checkFullFile(fs,p,fileLen,fileContents,"Failed to append to a partial chunk");
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
/**
* TC7: Corrupted replicas are present.
* @throws IOException an exception might be thrown
*/
@Test public void testTC7() throws Exception {
final short repl=2;
final Path p=new Path("/TC7/foo");
System.out.println("p=" + p);
final int len1=(int)(BLOCK_SIZE / 2);
{
FSDataOutputStream out=fs.create(p,false,buffersize,repl,BLOCK_SIZE);
AppendTestUtil.write(out,0,len1);
out.close();
}
DFSTestUtil.waitReplication(fs,p,repl);
final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(p.toString(),0L,len1);
assertEquals(1,locatedblocks.locatedBlockCount());
final LocatedBlock lb=locatedblocks.get(0);
final ExtendedBlock blk=lb.getBlock();
assertEquals(len1,lb.getBlockSize());
DatanodeInfo[] datanodeinfos=lb.getLocations();
assertEquals(repl,datanodeinfos.length);
final DataNode dn=cluster.getDataNode(datanodeinfos[0].getIpcPort());
final File f=DataNodeTestUtils.getBlockFile(dn,blk.getBlockPoolId(),blk.getLocalBlock());
final RandomAccessFile raf=new RandomAccessFile(f,"rw");
AppendTestUtil.LOG.info("dn=" + dn + ", blk="+ blk+ " (length="+ blk.getNumBytes()+ ")");
assertEquals(len1,raf.length());
raf.setLength(0);
raf.close();
final int len2=(int)BLOCK_SIZE;
{
FSDataOutputStream out=fs.append(p);
AppendTestUtil.write(out,len1,len2);
out.close();
}
AppendTestUtil.check(fs,p,len1 + len2);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that an append with no locations fails with an exception
* showing insufficient locations.
*/
@Test(timeout=60000) public void testAppendInsufficientLocations() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,3000);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
DistributedFileSystem fileSystem=null;
try {
fileSystem=cluster.getFileSystem();
Path f=new Path("/testAppend");
FSDataOutputStream create=fileSystem.create(f,(short)2);
create.write("/testAppend".getBytes());
create.close();
DFSTestUtil.waitReplication(fileSystem,f,(short)2);
LocatedBlocks lbs=fileSystem.dfs.getNamenode().getBlockLocations("/testAppend",0,Long.MAX_VALUE);
List dnsOfCluster=cluster.getDataNodes();
DatanodeInfo[] dnsWithLocations=lbs.getLastLocatedBlock().getLocations();
for ( DataNode dn : dnsOfCluster) {
for ( DatanodeInfo loc : dnsWithLocations) {
if (dn.getDatanodeId().equals(loc)) {
dn.shutdown();
DFSTestUtil.waitForDatanodeDeath(dn);
}
}
}
DFSTestUtil.waitReplication(fileSystem,f,(short)0);
try {
fileSystem.append(f);
fail("Append should fail because insufficient locations");
}
catch ( IOException e) {
LOG.info("Expected exception: ",e);
}
FSDirectory dir=cluster.getNamesystem().getFSDirectory();
final INodeFile inode=INodeFile.valueOf(dir.getINode("/testAppend"),"/testAppend");
assertTrue("File should remain closed",!inode.isUnderConstruction());
}
finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-2991. Creates and appends to files
* where blocks start/end on block boundaries.
*/
@Test public void testAppendRestart() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0);
MiniDFSCluster cluster=null;
FSDataOutputStream stream=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fs=cluster.getFileSystem();
File editLog=new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster,0).get(0),NNStorage.getInProgressEditsFileName(1));
EnumMap> counts;
Path p1=new Path("/block-boundaries");
writeAndAppend(fs,p1,BLOCK_SIZE,BLOCK_SIZE);
counts=FSImageTestUtil.countEditLogOpTypes(editLog);
assertEquals(2,(int)counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(2,(int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2,(int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
Path p2=new Path("/not-block-boundaries");
writeAndAppend(fs,p2,BLOCK_SIZE / 2,BLOCK_SIZE);
counts=FSImageTestUtil.countEditLogOpTypes(editLog);
assertEquals(2 + 2,(int)counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(1,(int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
assertEquals(2 + 2,(int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2 + 2,(int)counts.get(FSEditLogOpCodes.OP_CLOSE).held);
cluster.restartNameNode();
AppendTestUtil.check(fs,p1,2 * BLOCK_SIZE);
AppendTestUtil.check(fs,p2,3 * BLOCK_SIZE / 2);
}
finally {
IOUtils.closeStream(stream);
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Earlier versions of HDFS had a bug (HDFS-2991) which caused
* append(), when called exactly at a block boundary,
* to not log an OP_ADD. This ensures that we can read from
* such buggy versions correctly, by loading an image created
* using a namesystem image created with 0.23.1-rc2 exhibiting
* the issue.
*/
@Test public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
final Configuration conf=new HdfsConfiguration();
String tarFile=System.getProperty("test.cache.data","build/test/cache") + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
String testDir=PathUtils.getTestDirName(getClass());
File dfsDir=new File(testDir,"image-with-buggy-append");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
FileUtil.unTar(new File(tarFile),new File(testDir));
File nameDir=new File(dfsDir,"name");
GenericTestUtils.assertExists(nameDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).numDataNodes(0).waitSafeMode(false).startupOption(StartupOption.UPGRADE).build();
try {
FileSystem fs=cluster.getFileSystem();
Path testPath=new Path("/tmp/io_data/test_io_0");
assertEquals(2 * 1024 * 1024,fs.getFileStatus(testPath).getLen());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the case that a replica is reported corrupt while it is not
* in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
* See Hadoop-4351.
*/
@Test public void testArrayOutOfBoundsException() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
final Path FILE_PATH=new Path("/tmp.txt");
final long FILE_LEN=1L;
DFSTestUtil.createFile(fs,FILE_PATH,FILE_LEN,(short)2,1L);
final String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(0,0);
File dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("Data directory does not exist",dataDir.exists());
ExtendedBlock blk=getBlock(bpid,dataDir);
if (blk == null) {
storageDir=cluster.getInstanceStorageDir(0,1);
dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
blk=getBlock(bpid,dataDir);
}
assertFalse("Data directory does not contain any blocks or there was an " + "IO error",blk == null);
cluster.startDataNodes(conf,1,true,null,null);
ArrayList datanodes=cluster.getDataNodes();
assertEquals(datanodes.size(),3);
DataNode dataNode=datanodes.get(2);
DatanodeRegistration dnR=DataNodeTestUtils.getDNRegistrationForBP(dataNode,blk.getBlockPoolId());
FSNamesystem ns=cluster.getNamesystem();
ns.writeLock();
try {
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,new DatanodeInfo(dnR),"TEST","STORAGE_ID");
}
finally {
ns.writeUnlock();
}
fs.open(FILE_PATH);
fs.delete(FILE_PATH,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* check if DFS can handle corrupted blocks properly
*/
@Test public void testFileCorruption() throws Exception {
MiniDFSCluster cluster=null;
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFileCorruption").setNumFiles(20).build();
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
util.createFiles(fs,"/srcdat");
File storageDir=cluster.getInstanceStorageDir(2,0);
String bpid=cluster.getNamesystem().getBlockPoolId();
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("data directory does not exist",data_dir.exists());
File[] blocks=data_dir.listFiles();
assertTrue("Blocks do not exist in data-dir",(blocks != null) && (blocks.length > 0));
for (int idx=0; idx < blocks.length; idx++) {
if (!blocks[idx].getName().startsWith("blk_")) {
continue;
}
System.out.println("Deliberately removing file " + blocks[idx].getName());
assertTrue("Cannot remove file.",blocks[idx].delete());
}
assertTrue("Corrupted replicas not handled properly.",util.checkFiles(fs,"/srcdat"));
util.cleanup(fs,"/srcdat");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Test that file data does not become corrupted even in the face of errors.
*/
@Test public void testFileCreationError1() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
try {
Path file1=new Path("/filestatus.dat");
FSDataOutputStream stm=createFile(fs,file1,1);
assertTrue(file1 + " should be a file",fs.getFileStatus(file1).isFile());
System.out.println("Path : \"" + file1 + "\"");
cluster.shutdownDataNodes();
while (true) {
DatanodeInfo[] info=client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
if (info.length == 0) {
break;
}
System.out.println("testFileCreationError1: waiting for datanode " + " to die.");
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
byte[] buffer=AppendTestUtil.randomBytes(seed,1);
try {
stm.write(buffer);
stm.close();
}
catch ( Exception e) {
System.out.println("Encountered expected exception");
}
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up",locations.locatedBlockCount() == 0);
}
finally {
cluster.shutdown();
client.close();
}
}
IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test creating two files at the same time.
*/
@Test public void testConcurrentFileCreation() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs=cluster.getFileSystem();
Path[] p={new Path("/foo"),new Path("/bar")};
FSDataOutputStream[] out={fs.create(p[0]),fs.create(p[1])};
int i=0;
for (; i < 100; i++) {
out[0].write(i);
out[1].write(i);
}
out[0].close();
for (; i < 200; i++) {
out[1].write(i);
}
out[1].close();
FSDataInputStream[] in={fs.open(p[0]),fs.open(p[1])};
for (i=0; i < 100; i++) {
assertEquals(i,in[0].read());
}
for (i=0; i < 200; i++) {
assertEquals(i,in[1].read());
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Create a file, write something, hflush but not close.
* Then change lease period and wait for lease recovery.
* Finally, read the block directly from each Datanode and verify the content.
*/
@Test public void testLeaseExpireHardLimit() throws Exception {
System.out.println("testLeaseExpireHardLimit start");
final long leasePeriod=1000;
final int DATANODE_NUM=3;
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs=null;
try {
cluster.waitActive();
dfs=cluster.getFileSystem();
final String f=DIR + "foo";
final Path fpath=new Path(f);
HdfsDataOutputStream out=create(dfs,fpath,DATANODE_NUM);
out.write("something".getBytes());
out.hflush();
int actualRepl=out.getCurrentBlockReplication();
assertTrue(f + " should be replicated to " + DATANODE_NUM+ " datanodes.",actualRepl == DATANODE_NUM);
cluster.setLeasePeriod(leasePeriod,leasePeriod);
try {
Thread.sleep(5 * leasePeriod);
}
catch ( InterruptedException e) {
}
LocatedBlocks locations=dfs.dfs.getNamenode().getBlockLocations(f,0,Long.MAX_VALUE);
assertEquals(1,locations.locatedBlockCount());
LocatedBlock locatedblock=locations.getLocatedBlocks().get(0);
int successcount=0;
for ( DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
DataNode datanode=cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk=locatedblock.getBlock();
Block b=DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId());
final File blockfile=DataNodeTestUtils.getFile(datanode,blk.getBlockPoolId(),b.getBlockId());
System.out.println("blockfile=" + blockfile);
if (blockfile != null) {
BufferedReader in=new BufferedReader(new FileReader(blockfile));
assertEquals("something",in.readLine());
in.close();
successcount++;
}
}
System.out.println("successcount=" + successcount);
assertTrue(successcount > 0);
}
finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
System.out.println("testLeaseExpireHardLimit successful");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test that file leases are persisted across namenode restarts.
*/
@Test public void testFileCreationNamenodeRestart() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path file1=new Path("/filestatus.dat");
HdfsDataOutputStream stm=create(fs,file1,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1);
assertEquals(file1 + " should be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
writeFile(stm,numBlocks * blockSize);
stm.hflush();
assertEquals(file1 + " should still be replicated to 1 datanode.",1,stm.getCurrentBlockReplication());
Path fileRenamed=new Path("/filestatusRenamed.dat");
fs.rename(file1,fileRenamed);
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to "+ fileRenamed);
file1=fileRenamed;
Path file2=new Path("/filestatus2.dat");
FSDataOutputStream stm2=createFile(fs,file2,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2);
Path file3=new Path("/user/home/fullpath.dat");
FSDataOutputStream stm3=createFile(fs,file3,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3);
Path file4=new Path("/user/home/fullpath4.dat");
FSDataOutputStream stm4=createFile(fs,file4,1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4);
fs.mkdirs(new Path("/bin"));
fs.rename(new Path("/user/home"),new Path("/bin"));
Path file3new=new Path("/bin/home/fullpath.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to "+ file3new);
Path file4new=new Path("/bin/home/fullpath4.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to "+ file4new);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSOutputStream dfstream=(DFSOutputStream)(stm.getWrappedStream());
dfstream.setTestFilename(file1.toString());
dfstream=(DFSOutputStream)(stm3.getWrappedStream());
dfstream.setTestFilename(file3new.toString());
dfstream=(DFSOutputStream)(stm4.getWrappedStream());
dfstream.setTestFilename(file4new.toString());
byte[] buffer=AppendTestUtil.randomBytes(seed,1);
stm.write(buffer);
stm.close();
stm2.write(buffer);
stm2.close();
stm3.close();
stm4.close();
DFSClient client=fs.dfs;
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file1,locations.locatedBlockCount() == 3);
locations=client.getNamenode().getBlockLocations(file2.toString(),0,Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file2,locations.locatedBlockCount() == 1);
}
finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier PublicFieldVerifier
/**
* Test that the filesystem removes the last block from a file if its
* lease expires.
*/
@Test public void testFileCreationError2() throws IOException {
long leasePeriod=1000;
System.out.println("testFileCreationError2 start");
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs=null;
try {
cluster.waitActive();
dfs=cluster.getFileSystem();
DFSClient client=dfs.dfs;
Path file1=new Path("/filestatus.dat");
createFile(dfs,file1,1);
System.out.println("testFileCreationError2: " + "Created file filestatus.dat with one replicas.");
LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("testFileCreationError2: " + "The file has " + locations.locatedBlockCount() + " blocks.");
LocatedBlock location=client.getNamenode().addBlock(file1.toString(),client.clientName,null,null,INodeId.GRANDFATHER_INODE_ID,null);
System.out.println("testFileCreationError2: " + "Added block " + location.getBlock());
locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
int count=locations.locatedBlockCount();
System.out.println("testFileCreationError2: " + "The file now has " + count + " blocks.");
cluster.setLeasePeriod(leasePeriod,leasePeriod);
try {
Thread.sleep(5 * leasePeriod);
}
catch ( InterruptedException e) {
}
locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
System.out.println("testFileCreationError2: " + "locations = " + locations.locatedBlockCount());
assertEquals(0,locations.locatedBlockCount());
System.out.println("testFileCreationError2 successful");
}
finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test deleteOnExit
*/
@Test public void testDeleteOnExit() throws IOException {
Configuration conf=new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
FileSystem localfs=FileSystem.getLocal(conf);
try {
Path file1=new Path("filestatus.dat");
Path file2=new Path("filestatus2.dat");
Path file3=new Path("filestatus3.dat");
FSDataOutputStream stm1=createFile(fs,file1,1);
FSDataOutputStream stm2=createFile(fs,file2,1);
FSDataOutputStream stm3=createFile(localfs,file3,1);
System.out.println("DeleteOnExit: Created files.");
writeFile(stm1);
writeFile(stm3);
stm1.close();
stm2.close();
stm3.close();
fs.deleteOnExit(file1);
fs.deleteOnExit(file2);
localfs.deleteOnExit(file3);
fs.close();
localfs.close();
fs=null;
localfs=null;
fs=cluster.getFileSystem();
localfs=FileSystem.getLocal(conf);
assertTrue(file1 + " still exists inspite of deletOnExit set.",!fs.exists(file1));
assertTrue(file2 + " still exists inspite of deletOnExit set.",!fs.exists(file2));
assertTrue(file3 + " still exists inspite of deletOnExit set.",!localfs.exists(file3));
System.out.println("DeleteOnExit successful.");
}
finally {
IOUtils.closeStream(fs);
IOUtils.closeStream(localfs);
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test creating a file whose data gets sync when closed
*/
@Test public void testFileCreationSyncOnClose() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs=cluster.getFileSystem();
Path[] p={new Path("/foo"),new Path("/bar")};
FSDataOutputStream[] out={fs.create(p[0]),fs.create(p[1])};
int i=0;
for (; i < 100; i++) {
out[0].write(i);
out[1].write(i);
}
out[0].close();
for (; i < 200; i++) {
out[1].write(i);
}
out[1].close();
FSDataInputStream[] in={fs.open(p[0]),fs.open(p[1])};
for (i=0; i < 100; i++) {
assertEquals(i,in[0].read());
}
for (i=0; i < 200; i++) {
assertEquals(i,in[1].read());
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Test that server default values can be retrieved on the client side
*/
@Test public void testServerDefaults() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFS_BLOCK_SIZE_KEY,DFS_BLOCK_SIZE_DEFAULT);
conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY,DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
conf.setInt(DFS_REPLICATION_KEY,DFS_REPLICATION_DEFAULT + 1);
conf.setInt(IO_FILE_BUFFER_SIZE_KEY,IO_FILE_BUFFER_SIZE_DEFAULT);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
try {
FsServerDefaults serverDefaults=fs.getServerDefaults();
assertEquals(DFS_BLOCK_SIZE_DEFAULT,serverDefaults.getBlockSize());
assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT,serverDefaults.getBytesPerChecksum());
assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT,serverDefaults.getWritePacketSize());
assertEquals(DFS_REPLICATION_DEFAULT + 1,serverDefaults.getReplication());
assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT,serverDefaults.getFileBufferSize());
}
finally {
fs.close();
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test lease recovery Triggered by DFSClient.
*/
@Test public void testClientTriggeredLeaseRecovery() throws Exception {
final int REPLICATION=3;
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,REPLICATION);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
try {
final FileSystem fs=cluster.getFileSystem();
final Path dir=new Path("/wrwelkj");
SlowWriter[] slowwriters=new SlowWriter[10];
for (int i=0; i < slowwriters.length; i++) {
slowwriters[i]=new SlowWriter(fs,new Path(dir,"file" + i));
}
try {
for (int i=0; i < slowwriters.length; i++) {
slowwriters[i].start();
}
Thread.sleep(1000);
cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
System.out.println("Wait a few seconds");
Thread.sleep(5000);
}
finally {
for (int i=0; i < slowwriters.length; i++) {
if (slowwriters[i] != null) {
slowwriters[i].running=false;
slowwriters[i].interrupt();
}
}
for (int i=0; i < slowwriters.length; i++) {
if (slowwriters[i] != null) {
slowwriters[i].join();
}
}
}
System.out.println("Verify the file");
for (int i=0; i < slowwriters.length; i++) {
System.out.println(slowwriters[i].filepath + ": length=" + fs.getFileStatus(slowwriters[i].filepath).getLen());
FSDataInputStream in=null;
try {
in=fs.open(slowwriters[i].filepath);
for (int j=0, x; (x=in.read()) != -1; j++) {
assertEquals(j,x);
}
}
finally {
IOUtils.closeStream(in);
}
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testFileCreationDeleteParent() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path dir=new Path("/foo");
Path file1=new Path(dir,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1,1000);
stm1.hflush();
Path file2=new Path("/file2");
FSDataOutputStream stm2=TestFileCreation.createFile(fs,file2,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
TestFileCreation.writeFile(stm2,1000);
stm2.hflush();
fs.delete(dir,true);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
}
finally {
fs.close();
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests the fileLength when we sync the file and restart the cluster and
* Datanodes not report to Namenode yet.
*/
@Test(timeout=60000) public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
HdfsDataInputStream in=null;
try {
Path path=new Path("/tmp/TestFileLengthOnClusterRestart","test");
DistributedFileSystem dfs=cluster.getFileSystem();
FSDataOutputStream out=dfs.create(path);
int fileLength=1030;
out.write(new byte[fileLength]);
out.hsync();
cluster.restartNameNode();
cluster.waitActive();
in=(HdfsDataInputStream)dfs.open(path,1024);
Assert.assertEquals(fileLength,in.getVisibleLength());
cluster.shutdownDataNodes();
cluster.restartNameNode(false);
verifyNNIsInSafeMode(dfs);
try {
in=(HdfsDataInputStream)dfs.open(path);
Assert.fail("Expected IOException");
}
catch ( IOException e) {
Assert.assertTrue(e.getLocalizedMessage().indexOf("Name node is in safe mode") >= 0);
}
}
finally {
if (null != in) {
in.close();
}
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the FileStatus obtained calling listStatus on a file
*/
@Test public void testListStatusOnFile() throws IOException {
FileStatus[] stats=fs.listStatus(file1);
assertEquals(1,stats.length);
FileStatus status=stats[0];
assertFalse(file1 + " should be a file",status.isDirectory());
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
assertEquals(fileSize,status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
RemoteIterator itor=fc.listStatus(file1);
status=itor.next();
assertEquals(stats[0],status);
assertFalse(file1 + " should be a file",status.isDirectory());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the FileStatus obtained calling getFileStatus on a file
*/
@Test public void testGetFileStatusOnFile() throws Exception {
checkFile(fs,file1,1);
FileStatus status=fs.getFileStatus(file1);
assertFalse(file1 + " should be a file",status.isDirectory());
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
assertEquals(fileSize,status.getLen());
assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test calling getFileInfo directly on the client
*/
@Test public void testGetFileInfo() throws IOException {
Path path=new Path("/");
assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory());
HdfsFileStatus fileInfo=dfsClient.getFileInfo("/noSuchFile");
assertEquals("Non-existant file should result in null",null,fileInfo);
Path path1=new Path("/name1");
Path path2=new Path("/name1/name2");
assertTrue(fs.mkdirs(path1));
FSDataOutputStream out=fs.create(path2,false);
out.close();
fileInfo=dfsClient.getFileInfo(path1.toString());
assertEquals(1,fileInfo.getChildrenNum());
fileInfo=dfsClient.getFileInfo(path2.toString());
assertEquals(0,fileInfo.getChildrenNum());
try {
dfsClient.getFileInfo("non-absolute");
fail("getFileInfo for a non-absolute path did not throw IOException");
}
catch ( RemoteException re) {
assertTrue("Wrong exception for invalid file name",re.toString().contains("Invalid file name"));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test FileStatus objects obtained from a directory
*/
@Test public void testGetFileStatusOnDir() throws Exception {
Path dir=new Path("/test/mkdirs");
assertTrue("mkdir failed",fs.mkdirs(dir));
assertTrue("mkdir failed",fs.exists(dir));
FileStatus status=fs.getFileStatus(dir);
assertTrue(dir + " should be a directory",status.isDirectory());
assertTrue(dir + " should be zero size ",status.getLen() == 0);
assertEquals(dir.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString());
FileStatus[] stats=fs.listStatus(dir);
assertEquals(dir + " should be empty",0,stats.length);
assertEquals(dir + " should be zero size ",0,fs.getContentSummary(dir).getLength());
RemoteIterator itor=fc.listStatus(dir);
assertFalse(dir + " should be empty",itor.hasNext());
Path file2=new Path(dir,"filestatus2.dat");
DFSTestUtil.createFile(fs,file2,blockSize / 4,blockSize / 4,blockSize,(short)1,seed);
checkFile(fs,file2,1);
status=fs.getFileStatus(file2);
assertEquals(blockSize,status.getBlockSize());
assertEquals(1,status.getReplication());
file2=fs.makeQualified(file2);
assertEquals(file2.toString(),status.getPath().toString());
Path file3=new Path(dir,"filestatus3.dat");
DFSTestUtil.createFile(fs,file3,blockSize / 4,blockSize / 4,blockSize,(short)1,seed);
checkFile(fs,file3,1);
file3=fs.makeQualified(file3);
final int expected=blockSize / 2;
assertEquals(dir + " size should be " + expected,expected,fs.getContentSummary(dir).getLength());
stats=fs.listStatus(dir);
assertEquals(dir + " should have two entries",2,stats.length);
assertEquals(file2.toString(),stats[0].getPath().toString());
assertEquals(file3.toString(),stats[1].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse("Unexpected addtional file",itor.hasNext());
Path dir3=fs.makeQualified(new Path(dir,"dir3"));
fs.mkdirs(dir3);
dir3=fs.makeQualified(dir3);
stats=fs.listStatus(dir);
assertEquals(dir + " should have three entries",3,stats.length);
assertEquals(dir3.toString(),stats[0].getPath().toString());
assertEquals(file2.toString(),stats[1].getPath().toString());
assertEquals(file3.toString(),stats[2].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(dir3.toString(),itor.next().getPath().toString());
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse("Unexpected addtional file",itor.hasNext());
Path dir4=fs.makeQualified(new Path(dir,"dir4"));
fs.mkdirs(dir4);
dir4=fs.makeQualified(dir4);
Path dir5=fs.makeQualified(new Path(dir,"dir5"));
fs.mkdirs(dir5);
dir5=fs.makeQualified(dir5);
stats=fs.listStatus(dir);
assertEquals(dir + " should have five entries",5,stats.length);
assertEquals(dir3.toString(),stats[0].getPath().toString());
assertEquals(dir4.toString(),stats[1].getPath().toString());
assertEquals(dir5.toString(),stats[2].getPath().toString());
assertEquals(file2.toString(),stats[3].getPath().toString());
assertEquals(file3.toString(),stats[4].getPath().toString());
itor=fc.listStatus(dir);
assertEquals(dir3.toString(),itor.next().getPath().toString());
assertEquals(dir4.toString(),itor.next().getPath().toString());
assertEquals(dir5.toString(),itor.next().getPath().toString());
assertEquals(file2.toString(),itor.next().getPath().toString());
assertEquals(file3.toString(),itor.next().getPath().toString());
assertFalse(itor.hasNext());
fs.delete(dir,true);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* test getBlocks
*/
@Test public void testGetBlocks() throws Exception {
final Configuration CONF=new HdfsConfiguration();
final short REPLICATION_FACTOR=(short)2;
final int DEFAULT_BLOCK_SIZE=1024;
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).build();
try {
cluster.waitActive();
long fileLen=2 * DEFAULT_BLOCK_SIZE;
DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/tmp.txt"),fileLen,REPLICATION_FACTOR,0L);
List locatedBlocks;
DatanodeInfo[] dataNodes=null;
boolean notWritten;
do {
final DFSClient dfsclient=new DFSClient(NameNode.getAddress(CONF),CONF);
locatedBlocks=dfsclient.getNamenode().getBlockLocations("/tmp.txt",0,fileLen).getLocatedBlocks();
assertEquals(2,locatedBlocks.size());
notWritten=false;
for (int i=0; i < 2; i++) {
dataNodes=locatedBlocks.get(i).getLocations();
if (dataNodes.length != REPLICATION_FACTOR) {
notWritten=true;
try {
Thread.sleep(10);
}
catch ( InterruptedException e) {
}
break;
}
}
}
while (notWritten);
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
NamenodeProtocol namenode=NameNodeProxies.createProxy(CONF,NameNode.getUri(addr),NamenodeProtocol.class).getProxy();
BlockWithLocations[] locs;
locs=namenode.getBlocks(dataNodes[0],fileLen).getBlocks();
assertEquals(locs.length,2);
assertEquals(locs[0].getStorageIDs().length,2);
assertEquals(locs[1].getStorageIDs().length,2);
locs=namenode.getBlocks(dataNodes[0],DEFAULT_BLOCK_SIZE).getBlocks();
assertEquals(locs.length,1);
assertEquals(locs[0].getStorageIDs().length,2);
locs=namenode.getBlocks(dataNodes[0],1).getBlocks();
assertEquals(locs.length,1);
assertEquals(locs[0].getStorageIDs().length,2);
getBlocksWithException(namenode,dataNodes[0],0);
getBlocksWithException(namenode,dataNodes[0],-1);
DatanodeInfo info=DFSTestUtil.getDatanodeInfo("1.2.3.4");
getBlocksWithException(namenode,info,2);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test if the datanodes returned by{@link ClientProtocol#getBlockLocations(String,long,long)} is correct
* when stale nodes checking is enabled. Also test during the scenario when 1)
* stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
* becomes stale happen simultaneously
* @throws Exception
*/
@Test public void testReadSelectNonStaleDatanode() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,true);
long staleInterval=30 * 1000 * 60;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,staleInterval);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).racks(racks).build();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
List nodeInfoList=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeListForReport(DatanodeReportType.LIVE);
assertEquals("Unexpected number of datanodes",numDatanodes,nodeInfoList.size());
FileSystem fileSys=cluster.getFileSystem();
FSDataOutputStream stm=null;
try {
final Path fileName=new Path("/file1");
stm=fileSys.create(fileName,true,fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096),(short)3,blockSize);
stm.write(new byte[(blockSize * 3) / 2]);
stm.hflush();
LocatedBlocks blocks=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize);
DatanodeInfo[] nodes=blocks.get(0).getLocations();
assertEquals(nodes.length,3);
DataNode staleNode=null;
DatanodeDescriptor staleNodeInfo=null;
staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName());
assertNotNull(staleNode);
staleNodeInfo=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId());
staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1);
LocatedBlocks blocksAfterStale=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize);
DatanodeInfo[] nodesAfterStale=blocksAfterStale.get(0).getLocations();
assertEquals(nodesAfterStale.length,3);
assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName());
DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode,false);
staleNodeInfo.setLastUpdate(Time.now());
LocatedBlock lastBlock=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock();
nodes=lastBlock.getLocations();
assertEquals(nodes.length,3);
staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName());
assertNotNull(staleNode);
cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
LocatedBlock lastBlockAfterStale=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock();
nodesAfterStale=lastBlockAfterStale.getLocations();
assertEquals(nodesAfterStale.length,3);
assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName());
}
finally {
if (stm != null) {
stm.close();
}
if (client != null) {
client.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify datanode port usage.
*/
@Test public void testDataNodePorts() throws Exception {
NameNode nn=null;
try {
nn=startNameNode();
Configuration conf2=new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,new File(hdfsDir,"data").getPath());
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,FileSystem.getDefaultUri(config).getAuthority());
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,THIS_HOST);
boolean started=canStartDataNode(conf2);
assertFalse(started);
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
started=canStartDataNode(conf2);
assertFalse(started);
conf2.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,THIS_HOST);
conf2.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,THIS_HOST);
started=canStartDataNode(conf2);
assertTrue(started);
}
finally {
stopNameNode(nn);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify BackupNode port usage.
*/
@Test public void testBackupNodePorts() throws Exception {
NameNode nn=null;
try {
nn=startNameNode();
Configuration backup_config=new HdfsConfiguration(config);
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,THIS_HOST);
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
LOG.info("= Starting 1 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
assertFalse("Backup started on same port as Namenode",canStartBackupNode(backup_config));
backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,THIS_HOST);
LOG.info("= Starting 2 on: " + backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
boolean started=canStartBackupNode(backup_config);
assertTrue("Backup Namenode should've started",started);
}
finally {
stopNameNode(nn);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify secondary namenode port usage.
*/
@Test public void testSecondaryNodePorts() throws Exception {
NameNode nn=null;
try {
nn=startNameNode();
Configuration conf2=new HdfsConfiguration(config);
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
LOG.info("= Starting 1 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
boolean started=canStartSecondaryNode(conf2);
assertFalse(started);
conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,THIS_HOST);
LOG.info("= Starting 2 on: " + conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
started=canStartSecondaryNode(conf2);
assertTrue(started);
}
finally {
stopNameNode(nn);
}
}
InternalCallVerifier EqualityVerifier
/**
* Test hsync (with updating block length in NameNode) while no data is
* actually written yet
*/
@Test public void hSyncUpdateLength_00() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem fileSystem=cluster.getFileSystem();
try {
Path path=new Path(fName);
FSDataOutputStream stm=fileSystem.create(path,true,4096,(short)2,AppendTestUtil.BLOCK_SIZE);
System.out.println("Created file " + path.toString());
((DFSOutputStream)stm.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
long currentFileLength=fileSystem.getFileStatus(path).getLen();
assertEquals(0L,currentFileLength);
stm.close();
}
finally {
fileSystem.close();
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that we can set and clear quotas via {@link HdfsAdmin}.
*/
@Test public void testHdfsAdminSetQuota() throws Exception {
HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
FileSystem fs=null;
try {
fs=FileSystem.get(conf);
assertTrue(fs.mkdirs(TEST_PATH));
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setSpaceQuota(TEST_PATH,10);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.setQuota(TEST_PATH,10);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearSpaceQuota(TEST_PATH);
assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
dfsAdmin.clearQuota(TEST_PATH);
assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota());
assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota());
}
finally {
if (fs != null) {
fs.close();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test public void testFactory() throws Exception {
final String[] groups=new String[]{"supergroup"};
final UserGroupInformation[] ugi=new UserGroupInformation[3];
for (int i=0; i < ugi.length; i++) {
ugi[i]=UserGroupInformation.createUserForTesting("user" + i,groups);
}
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).create(anyString(),(FsPermission)anyObject(),anyString(),(EnumSetWritable)anyObject(),anyBoolean(),anyShort(),anyLong(),(List)anyList());
final Configuration conf=new Configuration();
final DFSClient c1=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out1=createFsOut(c1,"/out1");
final DFSClient c2=createDFSClientAs(ugi[0],conf);
FSDataOutputStream out2=createFsOut(c2,"/out2");
Assert.assertEquals(c1.getLeaseRenewer(),c2.getLeaseRenewer());
final DFSClient c3=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out3=createFsOut(c3,"/out3");
Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
final DFSClient c4=createDFSClientAs(ugi[1],conf);
FSDataOutputStream out4=createFsOut(c4,"/out4");
Assert.assertEquals(c3.getLeaseRenewer(),c4.getLeaseRenewer());
final DFSClient c5=createDFSClientAs(ugi[2],conf);
FSDataOutputStream out5=createFsOut(c5,"/out5");
Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testLeaseAbort() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN=cluster.getNameNodeRpc();
NamenodeProtocols spyNN=spy(preSpyNN);
DFSClient dfs=new DFSClient(null,spyNN,conf,null);
byte[] buf=new byte[1024];
FSDataOutputStream c_out=createFsOut(dfs,dirString + "c");
c_out.write(buf,0,1024);
c_out.close();
DFSInputStream c_in=dfs.open(dirString + "c");
FSDataOutputStream d_out=createFsOut(dfs,dirString + "d");
doThrow(new RemoteException(InvalidToken.class.getName(),"Your token is worthless")).when(spyNN).renewLease(anyString());
LeaseRenewer originalRenewer=dfs.getLeaseRenewer();
dfs.lastLeaseRenewal=Time.now() - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
try {
dfs.renewLease();
}
catch ( IOException e) {
}
try {
d_out.write(buf,0,1024);
LOG.info("Write worked beyond the soft limit as expected.");
}
catch ( IOException e) {
Assert.fail("Write failed.");
}
dfs.lastLeaseRenewal=Time.now() - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
dfs.renewLease();
try {
d_out.write(buf,0,1024);
d_out.close();
Assert.fail("Write did not fail even after the fatal lease renewal failure");
}
catch ( IOException e) {
LOG.info("Write failed as expected. ",e);
}
Thread.sleep(1000);
Assert.assertTrue(originalRenewer.isEmpty());
doNothing().when(spyNN).renewLease(anyString());
try {
int num=c_in.read(buf,0,1);
if (num != 1) {
Assert.fail("Failed to read 1 byte");
}
c_in.close();
}
catch ( IOException e) {
LOG.error("Read failed with ",e);
Assert.fail("Read after lease renewal failure failed");
}
try {
c_out=createFsOut(dfs,dirString + "c");
c_out.write(buf,0,1024);
c_out.close();
}
catch ( IOException e) {
LOG.error("Write failed with ",e);
Assert.fail("Write failed");
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that we can open up a file for write, move it to another location,
* and then create a new file in the previous location, without causing any
* lease conflicts. This is possible because we now use unique inode IDs
* to identify files to the NameNode.
*/
@Test public void testLeaseAfterRenameAndRecreate() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final Path path1=new Path("/test-file");
final String contents1="contents1";
final Path path2=new Path("/test-file-new-location");
final String contents2="contents2";
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out1=fs.create(path1);
out1.writeBytes(contents1);
Assert.assertTrue(hasLease(cluster,path1));
Assert.assertEquals(1,leaseCount(cluster));
DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
fs2.rename(path1,path2);
FSDataOutputStream out2=fs2.create(path1);
out2.writeBytes(contents2);
out2.close();
Assert.assertTrue(hasLease(cluster,path2));
out1.close();
DistributedFileSystem fs3=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
Assert.assertEquals(contents1,DFSTestUtil.readFile(fs3,path2));
Assert.assertEquals(contents2,DFSTestUtil.readFile(fs3,path1));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLeaseAfterRename() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
Path p=new Path("/test-file");
Path d=new Path("/test-d");
Path d2=new Path("/test-d-other");
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream out=fs.create(p);
out.writeBytes("something");
Assert.assertTrue(hasLease(cluster,p));
Assert.assertEquals(1,leaseCount(cluster));
DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf());
LOG.info("DMS: rename file into dir");
Path pRenamed=new Path(d,p.getName());
fs2.mkdirs(d);
fs2.rename(p,pRenamed);
Assert.assertFalse(p + " exists",fs2.exists(p));
Assert.assertTrue(pRenamed + " not found",fs2.exists(pRenamed));
Assert.assertFalse("has lease for " + p,hasLease(cluster,p));
Assert.assertTrue("no lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertEquals(1,leaseCount(cluster));
LOG.info("DMS: rename parent dir");
Path pRenamedAgain=new Path(d2,pRenamed.getName());
fs2.rename(d,d2);
Assert.assertFalse(d + " exists",fs2.exists(d));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d2 + " not found",fs2.exists(d2));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
LOG.info("DMS: rename parent again");
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(new Path(d,d2.getName()),p.getName());
fs2.mkdirs(d);
fs2.rename(d2,d);
Assert.assertFalse(d2 + " exists",fs2.exists(d2));
Assert.assertFalse("no lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d + " not found",fs2.exists(d));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(d2,p.getName());
fs2.rename(pRenamed.getParent(),d2,Options.Rename.OVERWRITE);
Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d2 + " not found",fs2.exists(d2));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
pRenamed=pRenamedAgain;
pRenamedAgain=new Path(d,p.getName());
fs2.rename(pRenamed.getParent(),d,Options.Rename.OVERWRITE);
Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent()));
Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed));
Assert.assertTrue(d + " not found",fs2.exists(d));
Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain));
Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain));
Assert.assertEquals(1,leaseCount(cluster));
out.close();
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testLease() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs=cluster.getFileSystem();
Assert.assertTrue(fs.mkdirs(dir));
Path a=new Path(dir,"a");
Path b=new Path(dir,"b");
DataOutputStream a_out=fs.create(a);
a_out.writeBytes("something");
Assert.assertTrue(hasLease(cluster,a));
Assert.assertTrue(!hasLease(cluster,b));
DataOutputStream b_out=fs.create(b);
b_out.writeBytes("something");
Assert.assertTrue(hasLease(cluster,a));
Assert.assertTrue(hasLease(cluster,b));
a_out.close();
b_out.close();
Assert.assertTrue(!hasLease(cluster,a));
Assert.assertTrue(!hasLease(cluster,b));
fs.delete(dir,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* The following test first creates a file with a few blocks.
* It randomly truncates the replica of the last block stored in each datanode.
* Finally, it triggers block synchronization to synchronize all stored block.
*/
@Test public void testBlockSynchronization() throws Exception {
final int ORG_FILE_SIZE=3000;
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,ORG_FILE_SIZE,REPLICATION_NUM,0L);
assertTrue(dfs.exists(filepath));
DFSTestUtil.waitReplication(dfs,filepath,REPLICATION_NUM);
LocatedBlock locatedblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr);
DatanodeInfo[] datanodeinfos=locatedblock.getLocations();
assertEquals(REPLICATION_NUM,datanodeinfos.length);
DataNode[] datanodes=new DataNode[REPLICATION_NUM];
for (int i=0; i < REPLICATION_NUM; i++) {
datanodes[i]=cluster.getDataNode(datanodeinfos[i].getIpcPort());
assertTrue(datanodes[i] != null);
}
ExtendedBlock lastblock=locatedblock.getBlock();
DataNode.LOG.info("newblocks=" + lastblock);
for (int i=0; i < REPLICATION_NUM; i++) {
checkMetaInfo(lastblock,datanodes[i]);
}
DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
cluster.getNameNodeRpc().append(filestr,dfs.dfs.clientName);
waitLeaseRecovery(cluster);
Block[] updatedmetainfo=new Block[REPLICATION_NUM];
long oldSize=lastblock.getNumBytes();
lastblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr).getBlock();
long currentGS=lastblock.getGenerationStamp();
for (int i=0; i < REPLICATION_NUM; i++) {
updatedmetainfo[i]=DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(lastblock.getBlockPoolId(),lastblock.getBlockId());
assertEquals(lastblock.getBlockId(),updatedmetainfo[i].getBlockId());
assertEquals(oldSize,updatedmetainfo[i].getNumBytes());
assertEquals(currentGS,updatedmetainfo[i].getGenerationStamp());
}
System.out.println("Testing that lease recovery cannot happen during safemode.");
filestr="/foo.safemode";
filepath=new Path(filestr);
dfs.create(filepath,(short)1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER,false);
assertTrue(dfs.dfs.exists(filestr));
DFSTestUtil.waitReplication(dfs,filepath,(short)1);
waitLeaseRecovery(cluster);
LeaseManager lm=NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
assertTrue("Found " + lm.countLease() + " lease, expected 1",lm.countLease() == 1);
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Block Recovery when the meta file not having crcs for all chunks in block
* file
*/
@Test public void testBlockRecoveryWithLessMetafile() throws Exception {
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,UserGroupInformation.getCurrentUser().getShortUserName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
Path file=new Path("/testRecoveryFile");
DistributedFileSystem dfs=cluster.getFileSystem();
FSDataOutputStream out=dfs.create(file);
int count=0;
while (count < 2 * 1024 * 1024) {
out.writeBytes("Data");
count+=4;
}
out.hsync();
((DFSOutputStream)out.getWrappedStream()).abort();
LocatedBlocks locations=cluster.getNameNodeRpc().getBlockLocations(file.toString(),0,count);
ExtendedBlock block=locations.get(0).getBlock();
DataNode dn=cluster.getDataNodes().get(0);
BlockLocalPathInfo localPathInfo=dn.getBlockLocalPathInfo(block,null);
File metafile=new File(localPathInfo.getMetaPath());
assertTrue(metafile.exists());
RandomAccessFile raf=new RandomAccessFile(metafile,"rw");
raf.setLength(metafile.length() - 20);
raf.close();
DataNodeProperties dnProp=cluster.stopDataNode(0);
cluster.restartDataNode(dnProp,true);
DistributedFileSystem newdfs=(DistributedFileSystem)FileSystem.newInstance(cluster.getConfiguration(0));
count=0;
while (++count < 10 && !newdfs.recoverLease(file)) {
Thread.sleep(1000);
}
assertTrue("File should be closed",newdfs.recoverLease(file));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* This test makes the client does not renew its lease and also
* set the hard lease expiration period to be short 1s. Thus triggering
* lease expiration to happen while the client is still alive.
* The test makes sure that the lease recovery completes and the client
* fails if it continues to write to the file.
* @throws Exception
*/
@Test public void testHardLeaseRecovery() throws Exception {
String filestr="/hardLeaseRecovery";
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath=new Path(filestr);
FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
int size=AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer,0,size);
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
cluster.setLeasePeriod(LONG_LEASE_PERIOD,SHORT_LEASE_PERIOD);
LocatedBlocks locatedBlocks;
do {
Thread.sleep(SHORT_LEASE_PERIOD);
locatedBlocks=dfs.dfs.getLocatedBlocks(filestr,0L,size);
}
while (locatedBlocks.isUnderConstruction());
assertEquals(size,locatedBlocks.getFileLength());
try {
stm.write('b');
stm.close();
fail("Writer thread should have been killed");
}
catch ( IOException e) {
e.printStackTrace();
}
AppendTestUtil.LOG.info("File size is good. Now validating sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* This test makes the client does not renew its lease and also
* set the soft lease expiration period to be short 1s. Thus triggering
* soft lease expiration to happen immediately by having another client
* trying to create the same file.
* The test makes sure that the lease recovery completes.
* @throws Exception
*/
@Test public void testSoftLeaseRecovery() throws Exception {
Map u2g_map=new HashMap(1);
u2g_map.put(fakeUsername,new String[]{fakeGroup});
DFSTestUtil.updateConfWithFakeGroupMapping(conf,u2g_map);
cluster.setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD,HdfsConstants.LEASE_HARDLIMIT_PERIOD);
String filestr="/foo" + AppendTestUtil.nextInt();
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath=new Path(filestr);
FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
int size=AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer,0,size);
AppendTestUtil.LOG.info("hflush");
stm.hflush();
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
cluster.setLeasePeriod(SHORT_LEASE_PERIOD,LONG_LEASE_PERIOD);
{
UserGroupInformation ugi=UserGroupInformation.createUserForTesting(fakeUsername,new String[]{fakeGroup});
FileSystem dfs2=DFSTestUtil.getFileSystemAs(ugi,conf);
boolean done=false;
for (int i=0; i < 10 && !done; i++) {
AppendTestUtil.LOG.info("i=" + i);
try {
dfs2.create(filepath,false,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE);
fail("Creation of an existing file should never succeed.");
}
catch ( FileAlreadyExistsException ex) {
done=true;
}
catch ( AlreadyBeingCreatedException ex) {
AppendTestUtil.LOG.info("GOOD! got " + ex.getMessage());
}
catch ( IOException ioe) {
AppendTestUtil.LOG.warn("UNEXPECTED IOException",ioe);
}
if (!done) {
AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
}
}
assertTrue(done);
}
AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "+ "Validating its contents now...");
long fileSize=dfs.getFileStatus(filepath).getLen();
assertTrue("File should be " + size + " bytes, but is actually "+ " found to be "+ fileSize+ " bytes",fileSize == size);
AppendTestUtil.LOG.info("File size is good. " + "Now validating data and sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testThreadName() throws Exception {
DFSOutputStream mockStream=Mockito.mock(DFSOutputStream.class);
long fileId=789L;
Assert.assertFalse("Renewer not initially running",renewer.isRunning());
renewer.put(fileId,mockStream,MOCK_DFSCLIENT);
Assert.assertTrue("Renewer should have started running",renewer.isRunning());
String threadName=renewer.getDaemonName();
Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/",threadName);
renewer.closeFile(fileId,MOCK_DFSCLIENT);
renewer.setEmptyTime(Time.now());
long failTime=Time.now() + 5000;
while (renewer.isRunning() && Time.now() < failTime) {
Thread.sleep(50);
}
Assert.assertFalse(renewer.isRunning());
}
InternalCallVerifier IdentityVerifier
/**
* Regression test for HDFS-2810. In this bug, the LeaseRenewer has handles
* to several DFSClients with the same name, the first of which has no files
* open. Previously, this was causing the lease to not get renewed.
*/
@Test public void testManyDfsClientsWhereSomeNotOpen() throws Exception {
final DFSClient mockClient1=createMockClient();
Mockito.doReturn(false).when(mockClient1).renewLease();
assertSame(renewer,LeaseRenewer.getInstance(FAKE_AUTHORITY,FAKE_UGI_A,mockClient1));
DFSOutputStream mockStream1=Mockito.mock(DFSOutputStream.class);
long fileId=456L;
renewer.put(fileId,mockStream1,mockClient1);
final DFSClient mockClient2=createMockClient();
Mockito.doReturn(true).when(mockClient2).renewLease();
assertSame(renewer,LeaseRenewer.getInstance(FAKE_AUTHORITY,FAKE_UGI_A,mockClient2));
DFSOutputStream mockStream2=Mockito.mock(DFSOutputStream.class);
renewer.put(fileId,mockStream2,mockClient2);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
try {
Mockito.verify(mockClient1,Mockito.atLeastOnce()).renewLease();
Mockito.verify(mockClient2,Mockito.atLeastOnce()).renewLease();
return true;
}
catch ( AssertionError err) {
LeaseRenewer.LOG.warn("Not yet satisfied",err);
return false;
}
catch ( IOException e) {
throw new RuntimeException(e);
}
}
}
,100,10000);
renewer.closeFile(fileId,mockClient1);
renewer.closeFile(fileId,mockClient2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a file
*/
@Test public void testFile() throws IOException {
fc.mkdir(TEST_DIR,FsPermission.getDefault(),true);
writeFile(fc,FILE1,FILE_LEN);
RemoteIterator itor=fc.util().listFiles(FILE1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fc.util().listFiles(FILE1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input path is a directory
*/
@Test public void testDirectory() throws IOException {
fc.mkdir(DIR1,FsPermission.getDefault(),true);
RemoteIterator itor=fc.util().listFiles(DIR1,true);
assertFalse(itor.hasNext());
itor=fc.util().listFiles(DIR1,false);
assertFalse(itor.hasNext());
writeFile(fc,FILE2,FILE_LEN);
itor=fc.util().listFiles(DIR1,true);
LocatedFileStatus stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
itor=fc.util().listFiles(DIR1,false);
stat=itor.next();
assertFalse(itor.hasNext());
assertTrue(stat.isFile());
assertEquals(FILE_LEN,stat.getLen());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
assertEquals(1,stat.getBlockLocations().length);
writeFile(fc,FILE1,FILE_LEN);
writeFile(fc,FILE3,FILE_LEN);
itor=fc.util().listFiles(TEST_DIR,true);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE3),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
itor=fc.util().listFiles(TEST_DIR,false);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input patch has a symbolic links as its children
*/
@Test public void testSymbolicLinks() throws IOException {
writeFile(fc,FILE1,FILE_LEN);
writeFile(fc,FILE2,FILE_LEN);
writeFile(fc,FILE3,FILE_LEN);
Path dir4=new Path(TEST_DIR,"dir4");
Path dir5=new Path(dir4,"dir5");
Path file4=new Path(dir4,"file4");
fc.createSymlink(DIR1,dir5,true);
fc.createSymlink(FILE1,file4,true);
RemoteIterator itor=fc.util().listFiles(dir4,true);
LocatedFileStatus stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE2),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE3),stat.getPath());
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
itor=fc.util().listFiles(dir4,false);
stat=itor.next();
assertTrue(stat.isFile());
assertEquals(fc.makeQualified(FILE1),stat.getPath());
assertFalse(itor.hasNext());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests get/set working directory in DFS.
*/
@Test(timeout=20000) public void testWorkingDirectory() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys=cluster.getFileSystem();
try {
Path orig_path=fileSys.getWorkingDirectory();
assertTrue(orig_path.isAbsolute());
Path file1=new Path("somewhat/random.txt");
writeFile(fileSys,file1);
assertTrue(fileSys.exists(new Path(orig_path,file1.toString())));
fileSys.delete(file1,true);
Path subdir1=new Path("/somewhere");
fileSys.setWorkingDirectory(subdir1);
writeFile(fileSys,file1);
cleanupFile(fileSys,new Path(subdir1,file1.toString()));
Path subdir2=new Path("else");
fileSys.setWorkingDirectory(subdir2);
writeFile(fileSys,file1);
readFile(fileSys,file1);
cleanupFile(fileSys,new Path(new Path(subdir1,subdir2.toString()),file1.toString()));
Path home=fileSys.makeQualified(new Path("/user/" + getUserName(fileSys)));
Path fsHome=fileSys.getHomeDirectory();
assertEquals(home,fsHome);
}
finally {
fileSys.close();
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Bring up two clusters and assert that they are in different directories.
* @throws Throwable on a failure
*/
@Test(timeout=100000) public void testDualClusters() throws Throwable {
File testDataCluster2=new File(testDataPath,CLUSTER_2);
File testDataCluster3=new File(testDataPath,CLUSTER_3);
Configuration conf=new HdfsConfiguration();
String c2Path=testDataCluster2.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c2Path);
MiniDFSCluster cluster2=new MiniDFSCluster.Builder(conf).build();
MiniDFSCluster cluster3=null;
try {
String dataDir2=cluster2.getDataDirectory();
assertEquals(new File(c2Path + "/data"),new File(dataDir2));
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,testDataCluster3.getAbsolutePath());
MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf);
cluster3=builder.build();
String dataDir3=cluster3.getDataDirectory();
assertTrue("Clusters are bound to the same directory: " + dataDir2,!dataDir2.equals(dataDir3));
}
finally {
MiniDFSCluster.shutdownCluster(cluster3);
MiniDFSCluster.shutdownCluster(cluster2);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Verify that without system properties the cluster still comes up, provided
* the configuration is set
* @throws Throwable on a failure
*/
@Test(timeout=100000) public void testClusterWithoutSystemProperties() throws Throwable {
System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
Configuration conf=new HdfsConfiguration();
File testDataCluster1=new File(testDataPath,CLUSTER_1);
String c1Path=testDataCluster1.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c1Path);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
assertEquals(new File(c1Path + "/data"),new File(cluster.getDataDirectory()));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* MiniDFSCluster should not clobber dfs.datanode.hostname if requested
*/
@Test(timeout=100000) public void testClusterSetDatanodeHostname() throws Throwable {
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,"MYHOST");
File testDataCluster5=new File(testDataPath,CLUSTER_5);
String c5Path=testDataCluster5.getAbsolutePath();
conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c5Path);
MiniDFSCluster cluster5=new MiniDFSCluster.Builder(conf).numDataNodes(1).checkDataNodeHostConfig(true).build();
try {
assertEquals("DataNode hostname config not respected","MYHOST",cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
}
finally {
MiniDFSCluster.shutdownCluster(cluster5);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMissingBlocksAlert() throws IOException, InterruptedException, MalformedObjectNameException, AttributeNotFoundException, MBeanException, ReflectionException, InstanceNotFoundException {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,0);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
int fileLen=10 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,fileLen / 2);
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
final BlockManager bm=cluster.getNamesystem().getBlockManager();
DistributedFileSystem dfs=cluster.getFileSystem();
DFSTestUtil.createFile(dfs,new Path("/testMissingBlocksAlert/file1"),fileLen,(short)3,0);
Path corruptFile=new Path("/testMissingBlocks/corruptFile");
DFSTestUtil.createFile(dfs,corruptFile,fileLen,(short)3,0);
ExtendedBlock block=DFSTestUtil.getFirstBlock(dfs,corruptFile);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0));
FSDataInputStream in=dfs.open(corruptFile);
try {
in.readFully(new byte[fileLen]);
}
catch ( ChecksumException ignored) {
}
in.close();
LOG.info("Waiting for missing blocks count to increase...");
while (dfs.getMissingBlocksCount() <= 0) {
Thread.sleep(100);
}
assertTrue(dfs.getMissingBlocksCount() == 1);
assertEquals(4,dfs.getUnderReplicatedBlocksCount());
assertEquals(3,bm.getUnderReplicatedNotMissingBlocks());
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
Assert.assertEquals(1,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks"));
dfs.delete(corruptFile,true);
LOG.info("Waiting for missing blocks count to be zero...");
while (dfs.getMissingBlocksCount() > 0) {
Thread.sleep(100);
}
assertEquals(2,dfs.getUnderReplicatedBlocksCount());
assertEquals(2,bm.getUnderReplicatedNotMissingBlocks());
Assert.assertEquals(0,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks"));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Regression test for HDFS-3864 - NN does not update internal file mtime for
* OP_CLOSE when reading from the edit log.
*/
@Test public void testModTimePersistsAfterRestart() throws IOException {
final long sleepTime=10;
MiniDFSCluster cluster=null;
FileSystem fs=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).build();
fs=cluster.getFileSystem();
Path testPath=new Path("/test");
OutputStream out=fs.create(testPath);
long initialModTime=fs.getFileStatus(testPath).getModificationTime();
assertTrue(initialModTime > 0);
ThreadUtil.sleepAtLeastIgnoreInterrupts(sleepTime);
out.close();
long modTimeAfterClose=fs.getFileStatus(testPath).getModificationTime();
assertTrue(modTimeAfterClose >= initialModTime + sleepTime);
cluster.restartNameNode();
long modTimeAfterRestart=fs.getFileStatus(testPath).getModificationTime();
assertEquals(modTimeAfterClose,modTimeAfterRestart);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests modification time in DFS.
*/
@Test public void testModTime() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ",numDatanodes,info.length);
FileSystem fileSys=cluster.getFileSystem();
int replicas=numDatanodes - 1;
assertTrue(fileSys instanceof DistributedFileSystem);
try {
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1=new Path("testdir1");
Path file1=new Path(dir1,"test1.dat");
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,(short)replicas,seed);
FileStatus stat=fileSys.getFileStatus(file1);
long mtime1=stat.getModificationTime();
assertTrue(mtime1 != 0);
stat=fileSys.getFileStatus(dir1);
long mdir1=stat.getModificationTime();
System.out.println("Creating testdir1/test2.dat.");
Path file2=new Path(dir1,"test2.dat");
DFSTestUtil.createFile(fileSys,file2,fileSize,fileSize,blockSize,(short)replicas,seed);
stat=fileSys.getFileStatus(file2);
stat=fileSys.getFileStatus(dir1);
assertTrue(stat.getModificationTime() >= mdir1);
mdir1=stat.getModificationTime();
Path dir2=fileSys.makeQualified(new Path("testdir2/"));
System.out.println("Creating testdir2 " + dir2);
assertTrue(fileSys.mkdirs(dir2));
stat=fileSys.getFileStatus(dir2);
long mdir2=stat.getModificationTime();
Path newfile=new Path(dir2,"testnew.dat");
System.out.println("Moving " + file1 + " to "+ newfile);
fileSys.rename(file1,newfile);
stat=fileSys.getFileStatus(newfile);
assertTrue(stat.getModificationTime() == mtime1);
stat=fileSys.getFileStatus(dir1);
assertTrue(stat.getModificationTime() != mdir1);
mdir1=stat.getModificationTime();
stat=fileSys.getFileStatus(dir2);
assertTrue(stat.getModificationTime() != mdir2);
mdir2=stat.getModificationTime();
System.out.println("Deleting testdir2/testnew.dat.");
assertTrue(fileSys.delete(newfile,true));
stat=fileSys.getFileStatus(dir1);
assertTrue(stat.getModificationTime() == mdir1);
stat=fileSys.getFileStatus(dir2);
assertTrue(stat.getModificationTime() != mdir2);
mdir2=stat.getModificationTime();
cleanupFile(fileSys,file2);
cleanupFile(fileSys,dir1);
cleanupFile(fileSys,dir2);
}
catch ( IOException e) {
info=client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
}
finally {
fileSys.close();
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testEviction() throws Exception {
final int CAPACITY=3;
PeerCache cache=new PeerCache(CAPACITY,100000);
DatanodeID dnIds[]=new DatanodeID[CAPACITY + 1];
FakePeer peers[]=new FakePeer[CAPACITY + 1];
for (int i=0; i < dnIds.length; ++i) {
dnIds[i]=new DatanodeID("192.168.0.1","fakehostname_" + i,"fake_datanode_id_" + i,100,101,102,103);
peers[i]=new FakePeer(dnIds[i],false);
}
for (int i=0; i < CAPACITY; ++i) {
cache.put(dnIds[i],peers[i]);
}
assertEquals(CAPACITY,cache.size());
cache.put(dnIds[CAPACITY],peers[CAPACITY]);
assertEquals(CAPACITY,cache.size());
assertSame(null,cache.get(dnIds[0],false));
for (int i=1; i < CAPACITY; ++i) {
Peer peer=cache.get(dnIds[i],false);
assertSame(peers[i],peer);
assertTrue(!peer.isClosed());
peer.close();
}
assertEquals(1,cache.size());
cache.close();
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDomainSocketPeers() throws Exception {
final int CAPACITY=3;
PeerCache cache=new PeerCache(CAPACITY,100000);
DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103);
HashMultiset peers=HashMultiset.create(CAPACITY);
for (int i=0; i < CAPACITY; ++i) {
FakePeer peer=new FakePeer(dnId,i == CAPACITY - 1);
peers.add(peer);
cache.put(dnId,peer);
}
assertEquals(CAPACITY,cache.size());
Peer peer=cache.get(dnId,true);
assertTrue(peer.getDomainSocket() != null);
peers.remove(peer);
peer=cache.get(dnId,true);
assertTrue(peer == null);
while (!peers.isEmpty()) {
peer=cache.get(dnId,false);
assertTrue(peer != null);
assertTrue(!peer.isClosed());
peers.remove(peer);
}
assertEquals(0,cache.size());
cache.close();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAddAndRetrieve() throws Exception {
PeerCache cache=new PeerCache(3,100000);
DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103);
FakePeer peer=new FakePeer(dnId,false);
cache.put(dnId,peer);
assertTrue(!peer.isClosed());
assertEquals(1,cache.size());
assertEquals(peer,cache.get(dnId,false));
assertEquals(0,cache.size());
cache.close();
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testExpiry() throws Exception {
final int CAPACITY=3;
final int EXPIRY_PERIOD=10;
PeerCache cache=new PeerCache(CAPACITY,EXPIRY_PERIOD);
DatanodeID dnIds[]=new DatanodeID[CAPACITY];
FakePeer peers[]=new FakePeer[CAPACITY];
for (int i=0; i < CAPACITY; ++i) {
dnIds[i]=new DatanodeID("192.168.0.1","fakehostname_" + i,"fake_datanode_id",100,101,102,103);
peers[i]=new FakePeer(dnIds[i],false);
}
for (int i=0; i < CAPACITY; ++i) {
cache.put(dnIds[i],peers[i]);
}
Thread.sleep(EXPIRY_PERIOD * 50);
assertEquals(0,cache.size());
for (int i=0; i < CAPACITY; ++i) {
assertTrue(peers[i].isClosed());
}
Thread.sleep(EXPIRY_PERIOD * 50);
cache.close();
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultiplePeersWithSameKey() throws Exception {
final int CAPACITY=3;
PeerCache cache=new PeerCache(CAPACITY,100000);
DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103);
HashMultiset peers=HashMultiset.create(CAPACITY);
for (int i=0; i < CAPACITY; ++i) {
FakePeer peer=new FakePeer(dnId,false);
peers.add(peer);
cache.put(dnId,peer);
}
assertEquals(CAPACITY,cache.size());
while (!peers.isEmpty()) {
Peer peer=cache.get(dnId,false);
assertTrue(peer != null);
assertTrue(!peer.isClosed());
peers.remove(peer);
}
assertEquals(0,cache.size());
cache.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testRestartDfsWithAbandonedBlock() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0);
MiniDFSCluster cluster=null;
long len=0;
FSDataOutputStream stream;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
stream=fs.create(FILE_PATH,true,BLOCK_SIZE,(short)1,BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.hflush();
while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) {
FileStatus status=fs.getFileStatus(FILE_PATH);
len=status.getLen();
Thread.sleep(100);
}
DFSClient dfsclient=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
HdfsFileStatus fileStatus=dfsclient.getNamenode().getFileInfo(FILE_NAME);
LocatedBlocks blocks=dfsclient.getNamenode().getBlockLocations(FILE_NAME,0,BLOCK_SIZE * NUM_BLOCKS);
assertEquals(NUM_BLOCKS,blocks.getLocatedBlocks().size());
LocatedBlock b=blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(),fileStatus.getFileId(),FILE_NAME,dfsclient.clientName);
cluster.restartNameNode();
FileStatus status=fs.getFileStatus(FILE_PATH);
assertTrue("Length incorrect: " + status.getLen(),status.getLen() == len - BLOCK_SIZE);
FSDataInputStream readStream=fs.open(FILE_PATH);
try {
byte[] verifyBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
IOUtils.readFully(readStream,verifyBuf,0,verifyBuf.length);
byte[] expectedBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE];
System.arraycopy(DATA_BEFORE_RESTART,0,expectedBuf,0,expectedBuf.length);
assertArrayEquals(expectedBuf,verifyBuf);
}
finally {
IOUtils.closeStream(readStream);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testRestartWithPartialBlockHflushed() throws IOException {
final Configuration conf=new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0);
MiniDFSCluster cluster=null;
FSDataOutputStream stream;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
NameNode.getAddress(conf).getPort();
stream=fs.create(FILE_PATH,true,BLOCK_SIZE,(short)1,BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART);
stream.write((byte)1);
stream.hflush();
cluster.restartNameNode();
stream.write((byte)2);
stream.hflush();
stream.close();
assertEquals(DATA_BEFORE_RESTART.length + 2,fs.getFileStatus(FILE_PATH).getLen());
FSDataInputStream readStream=fs.open(FILE_PATH);
try {
byte[] verifyBuf=new byte[DATA_BEFORE_RESTART.length + 2];
IOUtils.readFully(readStream,verifyBuf,0,verifyBuf.length);
byte[] expectedBuf=new byte[DATA_BEFORE_RESTART.length + 2];
System.arraycopy(DATA_BEFORE_RESTART,0,expectedBuf,0,DATA_BEFORE_RESTART.length);
System.arraycopy(new byte[]{1,2},0,expectedBuf,DATA_BEFORE_RESTART.length,2);
assertArrayEquals(expectedBuf,verifyBuf);
}
finally {
IOUtils.closeStream(readStream);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testRestartWithAppend() throws IOException {
final Configuration conf=new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0);
MiniDFSCluster cluster=null;
FSDataOutputStream stream;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
NameNode.getAddress(conf).getPort();
stream=fs.create(FILE_PATH,true,BLOCK_SIZE,(short)1,BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART,0,DATA_BEFORE_RESTART.length / 2);
stream.close();
stream=fs.append(FILE_PATH,BLOCK_SIZE);
stream.write(DATA_BEFORE_RESTART,DATA_BEFORE_RESTART.length / 2,DATA_BEFORE_RESTART.length / 2);
stream.close();
assertEquals(DATA_BEFORE_RESTART.length,fs.getFileStatus(FILE_PATH).getLen());
cluster.restartNameNode();
assertEquals(DATA_BEFORE_RESTART.length,fs.getFileStatus(FILE_PATH).getLen());
FSDataInputStream readStream=fs.open(FILE_PATH);
try {
byte[] verifyBuf=new byte[DATA_BEFORE_RESTART.length];
IOUtils.readFully(readStream,verifyBuf,0,verifyBuf.length);
assertArrayEquals(DATA_BEFORE_RESTART,verifyBuf);
}
finally {
IOUtils.closeStream(readStream);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Creates and closes a file of certain length.
* Calls append to allow next write() operation to add to the end of it
* After write() invocation, calls hflush() to make sure that data sunk through
* the pipeline and check the state of the last block's replica.
* It supposes to be in RBW state
* @throws IOException in case of an error
*/
@Test public void pipeline_01() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
if (LOG.isDebugEnabled()) {
LOG.debug("Invoking append but doing nothing otherwise...");
}
FSDataOutputStream ofs=fs.append(filePath);
ofs.writeBytes("Some more stuff to write");
((DFSOutputStream)ofs.getWrappedStream()).hflush();
List lb=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_SIZE - 1,FILE_SIZE).getLocatedBlocks();
String bpid=cluster.getNamesystem().getBlockPoolId();
for ( DataNode dn : cluster.getDataNodes()) {
Replica r=DataNodeTestUtils.fetchReplicaInfo(dn,bpid,lb.get(0).getBlock().getBlockId());
assertTrue("Replica on DN " + dn + " shouldn't be null",r != null);
assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()",HdfsServerConstants.ReplicaState.RBW,r.getState());
}
ofs.close();
}
InternalCallVerifier BooleanVerifier
@Test public void testMaxOutHedgedReadPool() throws IOException, InterruptedException, ExecutionException {
isHedgedRead=true;
Configuration conf=new Configuration();
int numHedgedReadPoolThreads=5;
final int initialHedgedReadTimeoutMillis=50000;
final int fixedSleepIntervalMillis=50;
conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,numHedgedReadPoolThreads);
conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,initialHedgedReadTimeoutMillis);
DFSClientFaultInjector.instance=Mockito.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector injector=DFSClientFaultInjector.instance;
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
Thread.sleep(fixedSleepIntervalMillis);
return null;
}
}
).when(injector).startFetchFromDatanode();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();
DistributedFileSystem fileSys=cluster.getFileSystem();
DFSClient dfsClient=fileSys.getClient();
DFSHedgedReadMetrics metrics=dfsClient.getHedgedReadMetrics();
metrics.hedgedReadOps.set(0);
metrics.hedgedReadOpsWin.set(0);
metrics.hedgedReadOpsInCurThread.set(0);
try {
Path file1=new Path("hedgedReadMaxOut.dat");
writeFile(fileSys,file1);
pReadFile(fileSys,file1);
assertTrue(metrics.getHedgedReadOps() == 0);
assertTrue(metrics.getHedgedReadOpsInCurThread() == 0);
dfsClient.setHedgedReadTimeout(50);
pReadFile(fileSys,file1);
assertTrue(metrics.getHedgedReadOps() > 0);
assertTrue(metrics.getHedgedReadOpsInCurThread() == 0);
int factor=10;
int numHedgedReads=numHedgedReadPoolThreads * factor;
long initialReadOpsValue=metrics.getHedgedReadOps();
ExecutorService executor=Executors.newFixedThreadPool(numHedgedReads);
ArrayList> futures=new ArrayList>();
for (int i=0; i < numHedgedReads; i++) {
futures.add(executor.submit(getPReadFileCallable(fileSys,file1)));
}
for (int i=0; i < numHedgedReads; i++) {
futures.get(i).get();
}
assertTrue(metrics.getHedgedReadOps() > initialReadOpsValue);
assertTrue(metrics.getHedgedReadOpsInCurThread() > 0);
cleanupFile(fileSys,file1);
executor.shutdown();
}
finally {
fileSys.close();
cluster.shutdown();
Mockito.reset(injector);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHedgedReadLoopTooManyTimes() throws IOException {
Configuration conf=new Configuration();
int numHedgedReadPoolThreads=5;
final int hedgedReadTimeoutMillis=50;
conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,numHedgedReadPoolThreads);
conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,hedgedReadTimeoutMillis);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,0);
DFSClientFaultInjector.instance=Mockito.mock(DFSClientFaultInjector.class);
DFSClientFaultInjector injector=DFSClientFaultInjector.instance;
final int sleepMs=100;
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
if (true) {
Thread.sleep(hedgedReadTimeoutMillis + sleepMs);
if (DFSClientFaultInjector.exceptionNum.compareAndSet(0,1)) {
System.out.println("-------------- throw Checksum Exception");
throw new ChecksumException("ChecksumException test",100);
}
}
return null;
}
}
).when(injector).fetchFromDatanodeException();
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
if (true) {
Thread.sleep(sleepMs * 2);
}
return null;
}
}
).when(injector).readFromDatanodeDelay();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
DistributedFileSystem fileSys=cluster.getFileSystem();
DFSClient dfsClient=fileSys.getClient();
FSDataOutputStream output=null;
DFSInputStream input=null;
String filename="/hedgedReadMaxOut.dat";
try {
Path file=new Path(filename);
output=fileSys.create(file,(short)2);
byte[] data=new byte[64 * 1024];
output.write(data);
output.flush();
output.write(data);
output.flush();
output.write(data);
output.flush();
output.close();
byte[] buffer=new byte[64 * 1024];
input=dfsClient.open(filename);
input.read(0,buffer,0,1024);
input.close();
assertEquals(3,input.getHedgedReadOpsLoopNumForTesting());
}
catch ( BlockMissingException e) {
assertTrue(false);
}
finally {
Mockito.reset(injector);
IOUtils.cleanup(null,input);
IOUtils.cleanup(null,output);
fileSys.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Violate a space quota using files of size < 1 block. Test that block
* allocation conservatively assumes that for quota checking the entire
* space of the block is used.
*/
@Test public void testBlockAllocationAdjustsUsageConservatively() throws Exception {
Configuration conf=new HdfsConfiguration();
final int BLOCK_SIZE=6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
DFSAdmin admin=new DFSAdmin(conf);
final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf);
try {
Path dir=new Path("/test");
Path file1=new Path("/test/test1");
Path file2=new Path("/test/test2");
boolean exceededQuota=false;
final int QUOTA_SIZE=3 * BLOCK_SIZE;
final int FILE_SIZE=BLOCK_SIZE / 2;
ContentSummary c;
assertTrue(fs.mkdirs(dir));
runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString());
DFSTestUtil.createFile(fs,file1,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file1,(short)3);
c=fs.getContentSummary(dir);
checkContentSummary(c,webhdfs.getContentSummary(dir));
assertEquals("Quota is half consumed",QUOTA_SIZE / 2,c.getSpaceConsumed());
try {
DFSTestUtil.createFile(fs,file2,FILE_SIZE,(short)3,1L);
}
catch ( QuotaExceededException e) {
exceededQuota=true;
}
assertTrue("Quota not exceeded",exceededQuota);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test HDFS operations that change disk space consumed by a directory tree.
* namely create, rename, delete, append, and setReplication.
* This is based on testNamespaceCommands() above.
*/
@Test public void testSpaceCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,"512");
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
try {
int fileLen=1024;
short replication=3;
int fileSpace=fileLen * replication;
assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")));
final Path quotaDir1=new Path("/nqdir0/qdir1");
dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,4 * fileSpace);
ContentSummary c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceQuota(),4 * fileSpace);
final Path quotaDir20=new Path("/nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir20,HdfsConstants.QUOTA_DONT_SET,6 * fileSpace);
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceQuota(),6 * fileSpace);
final Path quotaDir21=new Path("/nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir21));
dfs.setQuota(quotaDir21,HdfsConstants.QUOTA_DONT_SET,2 * fileSpace);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceQuota(),2 * fileSpace);
Path tempPath=new Path(quotaDir21,"nqdir32");
assertTrue(dfs.mkdirs(tempPath));
DFSTestUtil.createFile(dfs,new Path(tempPath,"fileDir/file1"),fileLen,replication,0);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),fileSpace);
boolean hasException=false;
try {
DFSTestUtil.createFile(dfs,new Path(quotaDir21,"nqdir33/file2"),2 * fileLen,replication,0);
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertTrue(dfs.delete(new Path(quotaDir21,"nqdir33"),true));
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),fileSpace);
assertEquals(c.getSpaceQuota(),2 * fileSpace);
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(),0);
Path dstPath=new Path(quotaDir20,"nqdir30");
Path srcPath=new Path(quotaDir21,"nqdir32");
assertTrue(dfs.rename(srcPath,dstPath));
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(),fileSpace);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceConsumed(),fileSpace);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),0);
final Path file2=new Path(dstPath,"fileDir/file2");
int file2Len=2 * fileLen;
DFSTestUtil.createFile(dfs,file2,file2Len,replication,0);
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(),3 * fileSpace);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),0);
hasException=false;
try {
assertFalse(dfs.rename(dstPath,srcPath));
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertFalse(dfs.exists(srcPath));
assertTrue(dfs.exists(dstPath));
c=dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(),3 * fileSpace);
c=dfs.getContentSummary(quotaDir21);
assertEquals(c.getSpaceConsumed(),0);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getSpaceQuota(),4 * fileSpace);
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),3 * fileSpace);
OutputStream out=dfs.append(file2);
out.write(new byte[fileLen]);
out.close();
file2Len+=fileLen;
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),4 * fileSpace);
dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,5 * fileSpace);
out=dfs.append(file2);
hasException=false;
try {
out.write(new byte[fileLen + 1024]);
out.flush();
out.close();
}
catch ( DSQuotaExceededException e) {
hasException=true;
IOUtils.closeStream(out);
}
assertTrue(hasException);
file2Len+=fileLen;
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),5 * fileSpace);
dfs.setReplication(file2,(short)(replication - 1));
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),5 * fileSpace - file2Len);
hasException=false;
try {
dfs.setReplication(file2,(short)(replication + 1));
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),5 * fileSpace - file2Len);
dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,10 * fileSpace);
dfs.setQuota(quotaDir20,HdfsConstants.QUOTA_DONT_SET,10 * fileSpace);
dfs.setReplication(file2,(short)(replication + 1));
c=dfs.getContentSummary(dstPath);
assertEquals(c.getSpaceConsumed(),5 * fileSpace + file2Len);
final Path quotaDir2053=new Path("/hdfs-2053");
assertTrue(dfs.mkdirs(quotaDir2053));
final Path quotaDir2053_A=new Path(quotaDir2053,"A");
assertTrue(dfs.mkdirs(quotaDir2053_A));
final Path quotaDir2053_B=new Path(quotaDir2053,"B");
assertTrue(dfs.mkdirs(quotaDir2053_B));
final Path quotaDir2053_C=new Path(quotaDir2053,"C");
assertTrue(dfs.mkdirs(quotaDir2053_C));
int sizeFactorA=1;
int sizeFactorB=2;
int sizeFactorC=4;
dfs.setQuota(quotaDir2053_C,HdfsConstants.QUOTA_DONT_SET,(sizeFactorC + 1) * fileSpace);
c=dfs.getContentSummary(quotaDir2053_C);
assertEquals(c.getSpaceQuota(),(sizeFactorC + 1) * fileSpace);
DFSTestUtil.createFile(dfs,new Path(quotaDir2053_A,"fileA"),sizeFactorA * fileLen,replication,0);
c=dfs.getContentSummary(quotaDir2053_A);
assertEquals(c.getSpaceConsumed(),sizeFactorA * fileSpace);
DFSTestUtil.createFile(dfs,new Path(quotaDir2053_B,"fileB"),sizeFactorB * fileLen,replication,0);
c=dfs.getContentSummary(quotaDir2053_B);
assertEquals(c.getSpaceConsumed(),sizeFactorB * fileSpace);
DFSTestUtil.createFile(dfs,new Path(quotaDir2053_C,"fileC"),sizeFactorC * fileLen,replication,0);
c=dfs.getContentSummary(quotaDir2053_C);
assertEquals(c.getSpaceConsumed(),sizeFactorC * fileSpace);
c=dfs.getContentSummary(quotaDir2053);
assertEquals(c.getSpaceConsumed(),(sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace);
assertEquals(20,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Like the previous test but create many files. This covers bugs where
* the quota adjustment is incorrect but it takes many files to accrue
* a big enough accounting error to violate the quota.
*/
@Test public void testMultipleFilesSmallerThanOneBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
final int BLOCK_SIZE=6 * 1024;
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true);
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
DFSAdmin admin=new DFSAdmin(conf);
final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf);
try {
long nsQuota=FSImageTestUtil.getNSQuota(cluster.getNameNode().getNamesystem());
assertTrue("Default namespace quota expected as long max. But the value is :" + nsQuota,nsQuota == Long.MAX_VALUE);
Path dir=new Path("/test");
boolean exceededQuota=false;
ContentSummary c;
final int FILE_SIZE=1024;
final int QUOTA_SIZE=32 * (int)fs.getDefaultBlockSize(dir);
assertEquals(6 * 1024,fs.getDefaultBlockSize(dir));
assertEquals(192 * 1024,QUOTA_SIZE);
assertTrue(fs.mkdirs(dir));
runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString());
for (int i=0; i < 59; i++) {
Path file=new Path("/test/test" + i);
DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file,(short)3);
}
c=fs.getContentSummary(dir);
checkContentSummary(c,webhdfs.getContentSummary(dir));
assertEquals("Invalid space consumed",59 * FILE_SIZE * 3,c.getSpaceConsumed());
assertEquals("Invalid space consumed",QUOTA_SIZE - (59 * FILE_SIZE * 3),3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
try {
Path file=new Path("/test/test59");
DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L);
DFSTestUtil.waitReplication(fs,file,(short)3);
}
catch ( QuotaExceededException e) {
exceededQuota=true;
}
assertTrue("Quota not exceeded",exceededQuota);
assertEquals(2,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test limit cases for setting space quotas.
*/
@Test public void testMaxSpaceQuotas() throws Exception {
final Configuration conf=new HdfsConfiguration();
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
final FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
final Path testFolder=new Path("/testFolder");
assertTrue(dfs.mkdirs(testFolder));
dfs.setQuota(testFolder,Long.MAX_VALUE - 1,10);
ContentSummary c=dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly",c.getQuota() == Long.MAX_VALUE - 1);
dfs.setQuota(testFolder,10,Long.MAX_VALUE - 1);
c=dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly",c.getSpaceQuota() == Long.MAX_VALUE - 1);
dfs.setQuota(testFolder,Long.MAX_VALUE,10);
c=dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed",c.getQuota() == 10);
dfs.setQuota(testFolder,10,Long.MAX_VALUE);
c=dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed",c.getSpaceQuota() == 10);
try {
dfs.setQuota(testFolder,Long.MAX_VALUE + 1,10);
fail("Exception not thrown");
}
catch ( IllegalArgumentException e) {
}
try {
dfs.setQuota(testFolder,10,Long.MAX_VALUE + 1);
fail("Exception not thrown");
}
catch ( IllegalArgumentException e) {
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test commands that change the size of the name space:
* mkdirs, rename, and delete
*/
@Test public void testNamespaceCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final DistributedFileSystem dfs=cluster.getFileSystem();
try {
assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")));
final Path quotaDir1=new Path("/nqdir0/qdir1");
dfs.setQuota(quotaDir1,6,HdfsConstants.QUOTA_DONT_SET);
ContentSummary c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),3);
assertEquals(c.getQuota(),6);
final Path quotaDir2=new Path("/nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir2,7,HdfsConstants.QUOTA_DONT_SET);
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),7);
final Path quotaDir3=new Path("/nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir3));
dfs.setQuota(quotaDir3,2,HdfsConstants.QUOTA_DONT_SET);
c=dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(),1);
assertEquals(c.getQuota(),2);
Path tempPath=new Path(quotaDir3,"nqdir32");
assertTrue(dfs.mkdirs(tempPath));
c=dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),2);
tempPath=new Path(quotaDir3,"nqdir33");
boolean hasException=false;
try {
assertFalse(dfs.mkdirs(tempPath));
}
catch ( NSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
c=dfs.getContentSummary(quotaDir3);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),2);
tempPath=new Path(quotaDir2,"nqdir31");
assertTrue(dfs.mkdirs(tempPath));
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),3);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),6);
assertEquals(c.getQuota(),6);
tempPath=new Path(quotaDir2,"nqdir33");
hasException=false;
try {
assertFalse(dfs.mkdirs(tempPath));
}
catch ( NSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
tempPath=new Path(quotaDir2,"nqdir30");
dfs.rename(new Path(quotaDir3,"nqdir32"),tempPath);
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),4);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),6);
assertEquals(c.getQuota(),6);
hasException=false;
try {
assertFalse(dfs.rename(tempPath,quotaDir3));
}
catch ( NSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertTrue(dfs.exists(tempPath));
assertFalse(dfs.exists(new Path(quotaDir3,"nqdir30")));
hasException=false;
try {
assertFalse(dfs.rename(tempPath,new Path(quotaDir3,"nqdir32")));
}
catch ( QuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertTrue(dfs.exists(tempPath));
assertFalse(dfs.exists(new Path(quotaDir3,"nqdir32")));
assertTrue(dfs.rename(tempPath,new Path("/nqdir0")));
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),4);
assertEquals(c.getQuota(),6);
assertTrue(dfs.mkdirs(new Path("/nqdir0/nqdir30/nqdir33")));
hasException=false;
try {
assertFalse(dfs.rename(new Path("/nqdir0/nqdir30"),tempPath));
}
catch ( NSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertTrue(dfs.rename(quotaDir3,quotaDir2));
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),4);
assertEquals(c.getQuota(),6);
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),3);
assertEquals(c.getQuota(),7);
tempPath=new Path(quotaDir2,"qdir21");
c=dfs.getContentSummary(tempPath);
assertEquals(c.getDirectoryCount(),1);
assertEquals(c.getQuota(),2);
dfs.delete(tempPath,true);
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),2);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),3);
assertEquals(c.getQuota(),6);
assertTrue(dfs.rename(new Path("/nqdir0/nqdir30"),quotaDir2));
c=dfs.getContentSummary(quotaDir2);
assertEquals(c.getDirectoryCount(),5);
assertEquals(c.getQuota(),7);
c=dfs.getContentSummary(quotaDir1);
assertEquals(c.getDirectoryCount(),6);
assertEquals(c.getQuota(),6);
assertEquals(14,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test quota related commands:
* setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count
*/
@Test public void testQuotaCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
final int DEFAULT_BLOCK_SIZE=512;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs=cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs=(DistributedFileSystem)fs;
DFSAdmin admin=new DFSAdmin(conf);
try {
final int fileLen=1024;
final short replication=5;
final long spaceQuota=fileLen * replication * 15 / 8;
final Path parent=new Path("/test");
assertTrue(dfs.mkdirs(parent));
String[] args=new String[]{"-setQuota","3",parent.toString()};
runCommand(admin,args,false);
runCommand(admin,false,"-setSpaceQuota","2t",parent.toString());
assertEquals(2L << 40,dfs.getContentSummary(parent).getSpaceQuota());
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota),parent.toString());
final Path childDir0=new Path(parent,"data0");
assertTrue(dfs.mkdirs(childDir0));
final Path childFile0=new Path(parent,"datafile0");
DFSTestUtil.createFile(fs,childFile0,fileLen,replication,0);
ContentSummary c=dfs.getContentSummary(parent);
assertEquals(c.getFileCount() + c.getDirectoryCount(),3);
assertEquals(c.getQuota(),3);
assertEquals(c.getSpaceConsumed(),fileLen * replication);
assertEquals(c.getSpaceQuota(),spaceQuota);
c=dfs.getContentSummary(childDir0);
assertEquals(c.getFileCount() + c.getDirectoryCount(),1);
assertEquals(c.getQuota(),-1);
c=dfs.getContentSummary(parent);
assertEquals(c.getSpaceConsumed(),fileLen * replication);
final Path childDir1=new Path(parent,"data1");
boolean hasException=false;
try {
assertFalse(dfs.mkdirs(childDir1));
}
catch ( QuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
OutputStream fout;
final Path childFile1=new Path(parent,"datafile1");
hasException=false;
try {
fout=dfs.create(childFile1);
}
catch ( QuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
runCommand(admin,new String[]{"-clrQuota",parent.toString()},false);
c=dfs.getContentSummary(parent);
assertEquals(c.getQuota(),-1);
assertEquals(c.getSpaceQuota(),spaceQuota);
runCommand(admin,new String[]{"-clrQuota",childDir0.toString()},false);
c=dfs.getContentSummary(childDir0);
assertEquals(c.getQuota(),-1);
fout=dfs.create(childFile1,replication);
try {
fout.write(new byte[fileLen]);
fout.close();
Assert.fail();
}
catch ( QuotaExceededException e) {
IOUtils.closeStream(fout);
}
dfs.delete(childFile1,false);
runCommand(admin,false,"-clrSpaceQuota",parent.toString());
c=dfs.getContentSummary(parent);
assertEquals(c.getQuota(),-1);
assertEquals(c.getSpaceQuota(),-1);
DFSTestUtil.createFile(dfs,childFile1,fileLen,replication,0);
args=new String[]{"-setQuota","1",parent.toString()};
runCommand(admin,args,false);
runCommand(admin,false,"-setSpaceQuota",Integer.toString(fileLen),args[2]);
args=new String[]{"-setQuota","1",childDir0.toString()};
runCommand(admin,args,false);
hasException=false;
try {
assertFalse(dfs.mkdirs(new Path(childDir0,"in")));
}
catch ( QuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
c=dfs.getContentSummary(childDir0);
assertEquals(c.getDirectoryCount() + c.getFileCount(),1);
assertEquals(c.getQuota(),1);
Path nonExistentPath=new Path("/test1");
assertFalse(dfs.exists(nonExistentPath));
args=new String[]{"-setQuota","1",nonExistentPath.toString()};
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota","1g",nonExistentPath.toString());
assertTrue(dfs.isFile(childFile0));
args[1]=childFile0.toString();
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota","1t",args[1]);
args[0]="-clrQuota";
runCommand(admin,args,true);
runCommand(admin,true,"-clrSpaceQuota",args[1]);
args[1]=nonExistentPath.toString();
runCommand(admin,args,true);
runCommand(admin,true,"-clrSpaceQuota",args[1]);
args=new String[]{"-setQuota","0",parent.toString()};
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota","0",args[2]);
args[1]="-1";
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota",args[1],args[2]);
args[1]=String.valueOf(Long.MAX_VALUE + 1L);
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota",args[1],args[2]);
args[1]="33aa1.5";
runCommand(admin,args,true);
runCommand(admin,true,"-setSpaceQuota",args[1],args[2]);
runCommand(admin,true,"-setSpaceQuota",(Long.MAX_VALUE / 1024 / 1024 + 1024) + "m",args[2]);
final String username="userxx";
UserGroupInformation ugi=UserGroupInformation.createUserForTesting(username,new String[]{"groupyy"});
final String[] args2=args.clone();
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
assertEquals("Not running as new user",username,UserGroupInformation.getCurrentUser().getShortUserName());
DFSAdmin userAdmin=new DFSAdmin(conf);
args2[1]="100";
runCommand(userAdmin,args2,true);
runCommand(userAdmin,true,"-setSpaceQuota","1g",args2[2]);
String[] args3=new String[]{"-clrQuota",parent.toString()};
runCommand(userAdmin,args3,true);
runCommand(userAdmin,true,"-clrSpaceQuota",args3[1]);
return null;
}
}
);
runCommand(admin,true,"-clrQuota","/");
runCommand(admin,false,"-setQuota","1000000","/");
runCommand(admin,true,"-clrQuota","/");
runCommand(admin,false,"-clrSpaceQuota","/");
runCommand(admin,new String[]{"-clrQuota",parent.toString()},false);
runCommand(admin,false,"-clrSpaceQuota",parent.toString());
final Path childDir2=new Path(parent,"data2");
assertTrue(dfs.mkdirs(childDir2));
final Path childFile2=new Path(childDir2,"datafile2");
final Path childFile3=new Path(childDir2,"datafile3");
final long spaceQuota2=DEFAULT_BLOCK_SIZE * replication;
final long fileLen2=DEFAULT_BLOCK_SIZE;
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),childDir2.toString());
runCommand(admin,false,"-clrSpaceQuota",childDir2.toString());
DFSTestUtil.createFile(fs,childFile2,fileLen2,replication,0);
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),childDir2.toString());
hasException=false;
try {
DFSTestUtil.createFile(fs,childFile3,fileLen2,replication,0);
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
final Path childFile4=new Path("/","datafile2");
final Path childFile5=new Path("/","datafile3");
runCommand(admin,true,"-clrQuota","/");
runCommand(admin,false,"-clrSpaceQuota","/");
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),"/");
runCommand(admin,false,"-clrSpaceQuota","/");
DFSTestUtil.createFile(fs,childFile4,fileLen2,replication,0);
runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),"/");
hasException=false;
try {
DFSTestUtil.createFile(fs,childFile5,fileLen2,replication,0);
}
catch ( DSQuotaExceededException e) {
hasException=true;
}
assertTrue(hasException);
assertEquals(4,cluster.getNamesystem().getFSDirectory().getYieldCount());
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* open /user/dir1/file1
* mkdir /user/dir2
* move /user/dir1/file1 /user/dir2/
*/
@Test public void testWhileOpenRenameToExistentDirectory() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1);
System.out.println("Test 3************************************");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path dir1=new Path("/user/dir1");
Path file1=new Path(dir1,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2=new Path("/user/dir2");
fs.mkdirs(dir2);
fs.rename(file1,dir2);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path newfile=new Path("/user/dir2","file1");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(newfile));
checkFullFile(fs,newfile);
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* open /user/dir1/file1 /user/dir2/file2
* mkdir /user/dir3
* move /user/dir1 /user/dir3
*/
@Test public void testWhileOpenRenameParent() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,TestFileCreation.blockSize);
System.out.println("Test 1*****************************");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
FSEditLog spyLog=spy(cluster.getNameNode().getFSImage().getEditLog());
doNothing().when(spyLog).endCurrentLogSegment(Mockito.anyBoolean());
cluster.getNameNode().getFSImage().setEditLogForTesting(spyLog);
final int nnport=cluster.getNameNodePort();
Path dir1=new Path("/user/a+b/dir1");
Path file1=new Path(dir1,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2=new Path("/user/dir2");
Path file2=new Path(dir2,"file2");
FSDataOutputStream stm2=TestFileCreation.createFile(fs,file2,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
TestFileCreation.writeFile(stm2);
stm2.hflush();
Path dir3=new Path("/user/dir3");
fs.mkdirs(dir3);
fs.rename(dir1,dir3);
Path file3=new Path(dir3,"file3");
FSDataOutputStream stm3=fs.create(file3);
fs.rename(file3,new Path(dir3,"bozo"));
TestFileCreation.writeFile(stm3,TestFileCreation.blockSize + 1);
stm3.hflush();
cluster.getNameNode().stop();
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path newfile=new Path("/user/dir3/dir1","file1");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
assertTrue(fs.exists(newfile));
checkFullFile(fs,newfile);
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* open /user/dir1/file1 /user/dir2/file2
* move /user/dir1 /user/dir3
*/
@Test public void testWhileOpenRenameParentToNonexistentDir() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1);
System.out.println("Test 2************************************");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path dir1=new Path("/user/dir1");
Path file1=new Path(dir1,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2=new Path("/user/dir2");
Path file2=new Path(dir2,"file2");
FSDataOutputStream stm2=TestFileCreation.createFile(fs,file2,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
TestFileCreation.writeFile(stm2);
stm2.hflush();
Path dir3=new Path("/user/dir3");
fs.rename(dir1,dir3);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path newfile=new Path("/user/dir3","file1");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
assertTrue(fs.exists(newfile));
checkFullFile(fs,newfile);
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* open /user/dir1/file1
* move /user/dir1/file1 /user/dir2/
*/
@Test public void testWhileOpenRenameToNonExistentDirectory() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1);
System.out.println("Test 4************************************");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=null;
try {
cluster.waitActive();
fs=cluster.getFileSystem();
final int nnport=cluster.getNameNodePort();
Path dir1=new Path("/user/dir1");
Path file1=new Path(dir1,"file1");
FSDataOutputStream stm1=TestFileCreation.createFile(fs,file1,1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1);
stm1.hflush();
Path dir2=new Path("/user/dir2");
fs.rename(file1,dir2);
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
cluster.shutdown();
try {
Thread.sleep(5000);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path newfile=new Path("/user","dir2");
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(newfile));
checkFullFile(fs,newfile);
}
finally {
fs.close();
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test DEFAULT ReplaceDatanodeOnFailure policy.
*/
@Test public void testDefaultPolicy() throws Exception {
final ReplaceDatanodeOnFailure p=ReplaceDatanodeOnFailure.DEFAULT;
final DatanodeInfo[] infos=new DatanodeInfo[5];
final DatanodeInfo[][] datanodes=new DatanodeInfo[infos.length + 1][];
datanodes[0]=new DatanodeInfo[0];
for (int i=0; i < infos.length; ) {
infos[i]=DFSTestUtil.getLocalDatanodeInfo(50020 + i);
i++;
datanodes[i]=new DatanodeInfo[i];
System.arraycopy(infos,0,datanodes[i],0,datanodes[i].length);
}
final boolean[] isAppend={true,true,false,false};
final boolean[] isHflushed={true,false,true,false};
for (short replication=1; replication <= infos.length; replication++) {
for (int nExistings=0; nExistings < datanodes.length; nExistings++) {
final DatanodeInfo[] existings=datanodes[nExistings];
Assert.assertEquals(nExistings,existings.length);
for (int i=0; i < isAppend.length; i++) {
for (int j=0; j < isHflushed.length; j++) {
final int half=replication / 2;
final boolean enoughReplica=replication <= nExistings;
final boolean noReplica=nExistings == 0;
final boolean replicationL3=replication < 3;
final boolean existingsLEhalf=nExistings <= half;
final boolean isAH=isAppend[i] || isHflushed[j];
final boolean expected;
if (enoughReplica || noReplica || replicationL3) {
expected=false;
}
else {
expected=isAH || existingsLEhalf;
}
final boolean computed=p.satisfy(replication,existings,isAppend[i],isHflushed[j]);
try {
Assert.assertEquals(expected,computed);
}
catch ( AssertionError e) {
final String s="replication=" + replication + "\nnExistings ="+ nExistings+ "\nisAppend ="+ isAppend[i]+ "\nisHflushed ="+ isHflushed[j];
throw new RuntimeException(s,e);
}
}
}
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testAppend() throws Exception {
final Configuration conf=new HdfsConfiguration();
final short REPLICATION=(short)3;
Assert.assertEquals(ReplaceDatanodeOnFailure.DEFAULT,ReplaceDatanodeOnFailure.get(conf));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
final DistributedFileSystem fs=cluster.getFileSystem();
final Path f=new Path(DIR,"testAppend");
{
LOG.info("create an empty file " + f);
fs.create(f,REPLICATION).close();
final FileStatus status=fs.getFileStatus(f);
Assert.assertEquals(REPLICATION,status.getReplication());
Assert.assertEquals(0L,status.getLen());
}
final byte[] bytes=new byte[1000];
{
LOG.info("append " + bytes.length + " bytes to "+ f);
final FSDataOutputStream out=fs.append(f);
out.write(bytes);
out.close();
final FileStatus status=fs.getFileStatus(f);
Assert.assertEquals(REPLICATION,status.getReplication());
Assert.assertEquals(bytes.length,status.getLen());
}
{
LOG.info("append another " + bytes.length + " bytes to "+ f);
try {
final FSDataOutputStream out=fs.append(f);
out.write(bytes);
out.close();
Assert.fail();
}
catch ( IOException ioe) {
LOG.info("This exception is expected",ioe);
}
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test replace datanode on failure.
*/
@Test public void testReplaceDatanodeOnFailure() throws Exception {
final Configuration conf=new HdfsConfiguration();
ReplaceDatanodeOnFailure.ALWAYS.write(conf);
final String[] racks=new String[REPLICATION];
Arrays.fill(racks,RACK0);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).racks(racks).numDataNodes(REPLICATION).build();
try {
final DistributedFileSystem fs=cluster.getFileSystem();
final Path dir=new Path(DIR);
final SlowWriter[] slowwriters=new SlowWriter[10];
for (int i=1; i <= slowwriters.length; i++) {
slowwriters[i - 1]=new SlowWriter(fs,new Path(dir,"file" + i),i * 200L);
}
for ( SlowWriter s : slowwriters) {
s.start();
}
sleepSeconds(1);
cluster.startDataNodes(conf,2,true,null,new String[]{RACK1,RACK1});
cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
sleepSeconds(5);
for ( SlowWriter s : slowwriters) {
s.checkReplication();
s.interruptRunning();
}
for ( SlowWriter s : slowwriters) {
s.joinAndClose();
}
LOG.info("Verify the file");
for (int i=0; i < slowwriters.length; i++) {
LOG.info(slowwriters[i].filepath + ": length=" + fs.getFileStatus(slowwriters[i].filepath).getLen());
FSDataInputStream in=null;
try {
in=fs.open(slowwriters[i].filepath);
for (int j=0, x; (x=in.read()) != -1; j++) {
Assert.assertEquals(j,x);
}
}
finally {
IOUtils.closeStream(in);
}
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPendingReplicationRetry() throws IOException {
MiniDFSCluster cluster=null;
int numDataNodes=4;
String testFile="/replication-test-file";
Path testPath=new Path(testFile);
byte buffer[]=new byte[1024];
for (int i=0; i < buffer.length; i++) {
buffer[i]='1';
}
try {
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes));
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
OutputStream out=cluster.getFileSystem().create(testPath);
out.write(buffer);
out.close();
waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1);
ExtendedBlock block=dfsClient.getNamenode().getBlockLocations(testFile,0,Long.MAX_VALUE).get(0).getBlock();
cluster.shutdown();
cluster=null;
for (int i=0; i < 25; i++) {
buffer[i]='0';
}
int fileCount=0;
for (int dnIndex=0; dnIndex < 3; dnIndex++) {
File blockFile=MiniDFSCluster.getBlockFile(dnIndex,block);
LOG.info("Checking for file " + blockFile);
if (blockFile != null && blockFile.exists()) {
if (fileCount == 0) {
LOG.info("Deleting file " + blockFile);
assertTrue(blockFile.delete());
}
else {
LOG.info("Corrupting file " + blockFile);
long len=blockFile.length();
assertTrue(len > 50);
RandomAccessFile blockOut=new RandomAccessFile(blockFile,"rw");
try {
blockOut.seek(len / 3);
blockOut.write(buffer,0,25);
}
finally {
blockOut.close();
}
}
fileCount++;
}
}
assertEquals(3,fileCount);
LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes));
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
conf.set("dfs.datanode.block.write.timeout.sec",Integer.toString(5));
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"0.75f");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build();
cluster.waitActive();
dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBadBlockReportOnTransfer() throws Exception {
Configuration conf=new HdfsConfiguration();
FileSystem fs=null;
DFSClient dfsClient=null;
LocatedBlocks blocks=null;
int replicaCount=0;
short replFactor=1;
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
fs=cluster.getFileSystem();
dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
Path file1=new Path("/tmp/testBadBlockReportOnTransfer/file1");
DFSTestUtil.createFile(fs,file1,1024,replFactor,0);
DFSTestUtil.waitReplication(fs,file1,replFactor);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1);
int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block);
assertEquals("Corrupted too few blocks",replFactor,blockFilesCorrupted);
replFactor=2;
fs.setReplication(file1,replFactor);
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
while (blocks.get(0).isCorrupt() != true) {
try {
LOG.info("Waiting until block is marked as corrupt...");
Thread.sleep(1000);
}
catch ( InterruptedException ie) {
}
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
}
replicaCount=blocks.get(0).getLocations().length;
assertTrue(replicaCount == 1);
cluster.shutdown();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testListDotReserved() throws Exception {
final Path baseFileRaw=new Path("/.reserved/raw/base");
final int len=8192;
DFSTestUtil.createFile(fs,baseFileRaw,len,(short)1,0xFEED);
try {
fs.listStatus(new Path("/.reserved"));
fail("expected FNFE");
}
catch ( FileNotFoundException e) {
assertExceptionContains("/.reserved does not exist",e);
}
try {
fs.listStatus(new Path("/.reserved/.inodes"));
fail("expected FNFE");
}
catch ( FileNotFoundException e) {
assertExceptionContains("/.reserved/.inodes does not exist",e);
}
final FileStatus[] fileStatuses=fs.listStatus(new Path("/.reserved/raw"));
assertEquals("expected 1 entry",fileStatuses.length,1);
assertMatches(fileStatuses[0].getPath().toString(),"/.reserved/raw/base");
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=300000) public void testCheckpoint() throws IOException, InterruptedException {
final Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,1);
MiniQJMHACluster cluster=null;
final Path foo=new Path("/foo");
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
queryForPreparation(dfs);
dfs.mkdirs(foo);
long txid=dfs.rollEdits();
Assert.assertTrue(txid > 0);
int retries=0;
while (++retries < 5) {
NNStorage storage=dfsCluster.getNamesystem(1).getFSImage().getStorage();
if (storage.getFsImageName(txid - 1) != null) {
return;
}
Thread.sleep(1000);
}
Assert.fail("new checkpoint does not exist");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testQuery() throws Exception {
final Configuration conf=new Configuration();
MiniQJMHACluster cluster=null;
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
dfsCluster.shutdownNameNode(1);
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
info=dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
Assert.assertFalse(info.createdRollbackImages());
dfsCluster.restartNameNode(1);
queryForPreparation(dfs);
Assert.assertTrue(dfsCluster.getNamesystem(0).getFSImage().hasRollbackFSImage());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Test DFSAdmin Upgrade Command.
*/
@Test public void testDFSAdminRollingUpgradeCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
final Path baz=new Path("/baz");
{
final DistributedFileSystem dfs=cluster.getFileSystem();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
dfs.mkdirs(foo);
runCmd(dfsadmin,false,"-rollingUpgrade","abc");
runCmd(dfsadmin,true,"-rollingUpgrade");
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
runCmd(dfsadmin,true,"-rollingUpgrade","prepare");
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
runCmd(dfsadmin,true,"-rollingUpgrade","query");
dfs.mkdirs(bar);
runCmd(dfsadmin,true,"-rollingUpgrade","finalize");
dfs.mkdirs(baz);
runCmd(dfsadmin,true,"-rollingUpgrade");
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
Assert.assertTrue(dfs.exists(baz));
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
{
final DistributedFileSystem dfs=cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
Assert.assertTrue(dfs.exists(baz));
}
}
finally {
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRollingUpgradeWithQJM() throws Exception {
String nnDirPrefix=MiniDFSCluster.getBaseDirectory() + "/nn/";
final File nn1Dir=new File(nnDirPrefix + "image1");
final File nn2Dir=new File(nnDirPrefix + "image2");
LOG.info("nn1Dir=" + nn1Dir);
LOG.info("nn2Dir=" + nn2Dir);
final Configuration conf=new HdfsConfiguration();
final MiniJournalCluster mjc=new MiniJournalCluster.Builder(conf).build();
setConf(conf,nn1Dir,mjc);
{
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
cluster.shutdown();
}
MiniDFSCluster cluster2=null;
try {
FileUtil.fullyDelete(nn2Dir);
FileUtil.copy(nn1Dir,FileSystem.getLocal(conf).getRaw(),new Path(nn2Dir.getAbsolutePath()),false,conf);
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
final Path baz=new Path("/baz");
final RollingUpgradeInfo info1;
{
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.mkdirs(foo);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
info1=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
LOG.info("START\n" + info1);
Assert.assertEquals(info1,dfs.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs.mkdirs(bar);
cluster.shutdown();
}
final Configuration conf2=setConf(new Configuration(),nn2Dir,mjc);
cluster2=new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build();
final DistributedFileSystem dfs2=cluster2.getFileSystem();
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertFalse(dfs2.exists(baz));
Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
dfs2.mkdirs(baz);
LOG.info("RESTART cluster 2");
cluster2.restartNameNode();
Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
try {
cluster2.restartNameNode("-upgrade");
}
catch ( IOException e) {
LOG.info("The exception is expected.",e);
}
LOG.info("RESTART cluster 2 again");
cluster2.restartNameNode();
Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY));
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
final RollingUpgradeInfo finalize=dfs2.rollingUpgrade(RollingUpgradeAction.FINALIZE);
LOG.info("FINALIZE: " + finalize);
Assert.assertEquals(info1.getStartTime(),finalize.getStartTime());
LOG.info("RESTART cluster 2 with regular startup option");
cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster2.restartNameNode();
Assert.assertTrue(dfs2.exists(foo));
Assert.assertTrue(dfs2.exists(bar));
Assert.assertTrue(dfs2.exists(baz));
}
finally {
if (cluster2 != null) cluster2.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testFinalize() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniQJMHACluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.getConfiguration(1).setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
FSImage fsimage=dfsCluster.getNamesystem(0).getFSImage();
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
queryForPreparation(dfs);
Assert.assertTrue(fsimage.hasRollbackFSImage());
info=dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
Assert.assertTrue(info.isFinalized());
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(fsimage.hasRollbackFSImage());
dfsCluster.restartNameNode(0);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
DataNode dn=cluster.getDataNodes().get(0);
final String dnAddr=dn.getDatanodeId().getIpcAddr(false);
final String[] args1={"-getDatanodeInfo",dnAddr};
Assert.assertEquals(0,dfsadmin.run(args1));
final String[] args2={"-shutdownDatanode",dnAddr,"upgrade"};
Assert.assertEquals(0,dfsadmin.run(args2));
Thread.sleep(2000);
Assert.assertFalse("DataNode should exit",dn.isDatanodeUp());
Assert.assertEquals(-1,dfsadmin.run(args1));
}
finally {
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testDowngrade() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniQJMHACluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.getConfiguration(1).setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
TestRollingUpgrade.queryForPreparation(dfs);
dfs.close();
dfsCluster.restartNameNode(0,true,"-rollingUpgrade","downgrade");
Assert.assertFalse(dfsCluster.getNamesystem(0).getFSImage().hasRollbackFSImage());
dfsCluster.shutdownNameNode(1);
dfsCluster.transitionToActive(0);
dfs=dfsCluster.getFileSystem(0);
Assert.assertTrue(dfs.exists(foo));
Assert.assertTrue(dfs.exists(bar));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRollbackCommand() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
dfs.mkdirs(foo);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"}));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(bar);
NNStorage storage=cluster.getNamesystem().getFSImage().getStorage();
checkNNStorage(storage,3,-1);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
NameNode nn=null;
try {
nn=NameNode.createNameNode(new String[]{"-rollingUpgrade","rollback"},conf);
INode fooNode=nn.getNamesystem().getFSDirectory().getINode4Write(foo.toString());
Assert.assertNotNull(fooNode);
INode barNode=nn.getNamesystem().getFSDirectory().getINode4Write(bar.toString());
Assert.assertNull(barNode);
NNStorage storage=nn.getNamesystem().getFSImage().getStorage();
checkNNStorage(storage,3,7);
}
finally {
if (nn != null) {
nn.stop();
nn.join();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRollbackWithQJM() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniJournalCluster mjc=null;
MiniDFSCluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
mjc=new MiniJournalCluster.Builder(conf).numJournalNodes(NUM_JOURNAL_NODES).build();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI(JOURNAL_ID).toString());
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
DistributedFileSystem dfs=cluster.getFileSystem();
final DFSAdmin dfsadmin=new DFSAdmin(conf);
dfs.mkdirs(foo);
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"}));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
dfs.mkdirs(bar);
dfs.close();
cluster.restartNameNode("-rollingUpgrade","rollback");
dfs=cluster.getFileSystem();
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
for (int i=0; i < NUM_JOURNAL_NODES; i++) {
File dir=mjc.getCurrentDir(0,JOURNAL_ID);
checkJNStorage(dir,4,7);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
if (mjc != null) {
mjc.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Test rollback scenarios where StandbyNameNode does checkpoints during
* rolling upgrade.
*/
@Test public void testRollbackWithHAQJM() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniQJMHACluster cluster=null;
final Path foo=new Path("/foo");
final Path bar=new Path("/bar");
try {
cluster=new MiniQJMHACluster.Builder(conf).build();
MiniDFSCluster dfsCluster=cluster.getDfsCluster();
dfsCluster.waitActive();
dfsCluster.getConfiguration(1).setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
dfsCluster.restartNameNode(1);
dfsCluster.transitionToActive(0);
DistributedFileSystem dfs=dfsCluster.getFileSystem(0);
dfs.mkdirs(foo);
RollingUpgradeInfo info=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
Assert.assertTrue(info.isStarted());
dfs.mkdirs(bar);
dfs.close();
TestRollingUpgrade.queryForPreparation(dfs);
Assert.assertTrue(dfsCluster.getNameNode(0).getFSImage().hasRollbackFSImage());
Assert.assertTrue(dfsCluster.getNameNode(1).getFSImage().hasRollbackFSImage());
dfsCluster.restartNameNode(0,true,"-rollingUpgrade","rollback");
dfsCluster.shutdownNameNode(1);
dfsCluster.transitionToActive(0);
dfs=dfsCluster.getFileSystem(0);
Assert.assertTrue(dfs.exists(foo));
Assert.assertFalse(dfs.exists(bar));
NNStorage storage=dfsCluster.getNamesystem(0).getFSImage().getStorage();
checkNNStorage(storage,4,7);
for (int i=0; i < NUM_JOURNAL_NODES; i++) {
File dir=cluster.getJournalCluster().getCurrentDir(0,MiniQJMHACluster.NAMESERVICE);
checkJNStorage(dir,5,7);
}
dfsCluster.restartNameNode(0);
dfsCluster.transitionToActive(0);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Test that, if there are no blocks in the filesystem,
* the NameNode doesn't enter the "safemode extension" period.
*/
@Test(timeout=45000) public void testNoExtensionIfNoBlocks() throws IOException {
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,60000);
cluster.restartNameNode();
String status=cluster.getNameNode().getNamesystem().getSafemode();
assertEquals("",status);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the NN initializes its under-replicated blocks queue
* before it is ready to exit safemode (HDFS-1476)
*/
@Test(timeout=45000) public void testInitializeReplQueuesEarly() throws Exception {
LOG.info("Starting testInitializeReplQueuesEarly");
BlockManagerTestUtil.setWritingPrefersLocalNode(cluster.getNamesystem().getBlockManager(),false);
cluster.startDataNodes(conf,2,true,StartupOption.REGULAR,null);
cluster.waitActive();
LOG.info("Creating files");
DFSTestUtil.createFile(fs,TEST_PATH,15 * BLOCK_SIZE,(short)1,1L);
LOG.info("Stopping all DataNodes");
List dnprops=Lists.newLinkedList();
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
dnprops.add(cluster.stopDataNode(0));
cluster.getConfiguration(0).setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,1f / 15f);
LOG.info("Restarting NameNode");
cluster.restartNameNode();
final NameNode nn=cluster.getNameNode();
String status=nn.getNamesystem().getSafemode();
assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + "The number of live datanodes 0 has reached the minimum number 0. "+ "Safe mode will be turned off automatically once the thresholds "+ "have been reached.",status);
assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed",NameNodeAdapter.safeModeInitializedReplQueues(nn));
LOG.info("Restarting one DataNode");
cluster.restartDataNode(dnprops.remove(0));
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return getLongCounter("StorageBlockReportOps",getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode();
}
}
,10,10000);
final int safe=NameNodeAdapter.getSafeModeSafeBlocks(nn);
assertTrue("Expected first block report to make some blocks safe.",safe > 0);
assertTrue("Did not expect first block report to make all blocks safe.",safe < 15);
assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn));
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
long underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
while (underReplicatedBlocks != (15 - safe)) {
LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual="+ underReplicatedBlocks);
Thread.sleep(100);
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks();
}
cluster.restartDataNodes();
}
InternalCallVerifier BooleanVerifier
/**
* This test verifies that if SafeMode is manually entered, name-node does not
* come out of safe mode even after the startup safe mode conditions are met.
*
* - Start cluster with 1 data-node.
* - Create 2 files with replication 1.
* - Re-start cluster with 0 data-nodes.
* Name-node should stay in automatic safe-mode.
* - Enter safe mode manually.
* - Start the data-node.
* - Wait longer than dfs.namenode.safemode.extension and
* verify that the name-node is still in safe mode.
*
* @throws IOException
*/
@Test public void testManualSafeMode() throws IOException {
fs=cluster.getFileSystem();
Path file1=new Path("/tmp/testManualSafeMode/file1");
Path file2=new Path("/tmp/testManualSafeMode/file2");
DFSTestUtil.createFile(fs,file1,1000,(short)1,0);
DFSTestUtil.createFile(fs,file2,1000,(short)1,0);
fs.close();
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
cluster.waitActive();
dfs=cluster.getFileSystem();
assertTrue("No datanode is started. Should be in SafeMode",dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.startDataNodes(conf,1,true,null,null);
cluster.waitActive();
try {
Thread.sleep(2000);
}
catch ( InterruptedException ignored) {
}
assertTrue("should still be in SafeMode",dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
assertFalse("should not be in SafeMode",dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Run various fs operations while the NN is in safe mode,
* assert that they are either allowed or fail as expected.
*/
@Test public void testOperationsWhileInSafeMode() throws IOException, InterruptedException {
final Path file1=new Path("/file1");
assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
DFSTestUtil.createFile(fs,file1,1024,(short)1,0);
assertTrue("Could not enter SM",dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
runFsFun("Set quota while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
((DistributedFileSystem)fs).setQuota(file1,1,1);
}
}
);
runFsFun("Set perm while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setPermission(file1,FsPermission.getDefault());
}
}
);
runFsFun("Set owner while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setOwner(file1,"user","group");
}
}
);
runFsFun("Set repl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setReplication(file1,(short)1);
}
}
);
runFsFun("Append file while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
DFSTestUtil.appendFile(fs,file1,"new bytes");
}
}
);
runFsFun("Delete file while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.delete(file1,false);
}
}
);
runFsFun("Rename file while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.rename(file1,new Path("file2"));
}
}
);
runFsFun("Set time while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setTimes(file1,0,0);
}
}
);
runFsFun("modifyAclEntries while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.modifyAclEntries(file1,Lists.newArrayList());
}
}
);
runFsFun("removeAclEntries while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeAclEntries(file1,Lists.newArrayList());
}
}
);
runFsFun("removeDefaultAcl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeDefaultAcl(file1);
}
}
);
runFsFun("removeAcl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeAcl(file1);
}
}
);
runFsFun("setAcl while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setAcl(file1,Lists.newArrayList());
}
}
);
runFsFun("setXAttr while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.setXAttr(file1,"user.a1",null);
}
}
);
runFsFun("removeXAttr while in SM",new FSRun(){
@Override public void run( FileSystem fs) throws IOException {
fs.removeXAttr(file1,"user.a1");
}
}
);
try {
DFSTestUtil.readFile(fs,file1);
}
catch ( IOException ioe) {
fail("Set times failed while in SM");
}
try {
fs.getAclStatus(file1);
}
catch ( IOException ioe) {
fail("getAclStatus failed while in SM");
}
UserGroupInformation ugiX=UserGroupInformation.createRemoteUser("userX");
FileSystem myfs=ugiX.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws IOException {
return FileSystem.get(conf);
}
}
);
myfs.access(file1,FsAction.READ);
try {
myfs.access(file1,FsAction.WRITE);
fail("The access call should have failed.");
}
catch ( AccessControlException e) {
}
assertFalse("Could not leave SM",dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
}
InternalCallVerifier EqualityVerifier
/**
* Test that, when under-replicated blocks are processed at the end of
* safe-mode, blocks currently under construction are not considered
* under-construction or missing. Regression test for HDFS-2822.
*/
@Test public void testRbwBlocksNotConsideredUnderReplicated() throws IOException {
List stms=Lists.newArrayList();
try {
DFSTestUtil.createFile(fs,new Path("/junk-blocks"),BLOCK_SIZE * 4,(short)1,1L);
for (int i=0; i < 10; i++) {
FSDataOutputStream stm=fs.create(new Path("/append-" + i),true,BLOCK_SIZE,(short)1,BLOCK_SIZE);
stms.add(stm);
stm.write(1);
stm.hflush();
}
cluster.restartNameNode();
FSNamesystem ns=cluster.getNameNode(0).getNamesystem();
BlockManagerTestUtil.updateState(ns.getBlockManager());
assertEquals(0,ns.getPendingReplicationBlocks());
assertEquals(0,ns.getCorruptReplicaBlocks());
assertEquals(0,ns.getMissingBlocksCount());
}
finally {
for ( FSDataOutputStream stm : stms) {
IOUtils.closeStream(stm);
}
cluster.shutdown();
}
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testSafeModeWhenZeroBlockLocations() throws IOException {
try {
Path file1=new Path("/tmp/testManualSafeMode/file1");
Path file2=new Path("/tmp/testManualSafeMode/file2");
System.out.println("Created file1 and file2.");
DFSTestUtil.createFile(fs,file1,1000,(short)1,0);
DFSTestUtil.createFile(fs,file2,2000,(short)1,0);
checkGetBlockLocationsWorks(fs,file1);
NameNode namenode=cluster.getNameNode();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
assertTrue("should still be in SafeMode",namenode.isInSafeMode());
checkGetBlockLocationsWorks(fs,file1);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("should not be in SafeMode",namenode.isInSafeMode());
cluster.shutdownDataNodes();
cluster.shutdownNameNode(0);
cluster.restartNameNode();
cluster.waitActive();
System.out.println("Restarted cluster with just the NameNode");
namenode=cluster.getNameNode();
assertTrue("No datanode is started. Should be in SafeMode",namenode.isInSafeMode());
FileStatus stat=fs.getFileStatus(file1);
try {
fs.getFileBlockLocations(stat,0,1000);
assertTrue("Should have got safemode exception",false);
}
catch ( SafeModeException e) {
}
catch ( RemoteException re) {
if (!re.getClassName().equals(SafeModeException.class.getName())) assertTrue("Should have got safemode exception",false);
}
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("Should not be in safemode",namenode.isInSafeMode());
checkGetBlockLocationsWorks(fs,file1);
}
finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that the NameNode stays in safemode when dfs.safemode.datanode.min
* is set to a number greater than the number of live datanodes.
*/
@Test public void testDatanodeThreshold() throws IOException {
cluster.shutdown();
Configuration conf=cluster.getConfiguration(0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,1);
cluster.restartNameNode();
fs=cluster.getFileSystem();
String tipMsg=cluster.getNamesystem().getSafemode();
assertTrue("Safemode tip message doesn't look right: " + tipMsg,tipMsg.contains("The number of live datanodes 0 needs an additional " + "1 live datanodes to reach the minimum number 1.\n" + "Safe mode will be turned off automatically"));
cluster.startDataNodes(conf,1,true,null,null);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
assertEquals("",cluster.getNamesystem().getSafemode());
}
InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier
/**
* Test (expected to throw IOE) for negative
* FSDataInpuStream#seek argument
*/
@Test(expected=IOException.class) public void testNegativeSeek() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
try {
Path seekFile=new Path("seekboundaries.dat");
DFSTestUtil.createFile(fs,seekFile,ONEMB,ONEMB,fs.getDefaultBlockSize(seekFile),fs.getDefaultReplication(seekFile),seed);
FSDataInputStream stream=fs.open(seekFile);
stream.seek(65536);
assertEquals(65536,stream.getPos());
stream.seek(-73);
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier
/**
* Test (expected to throw IOE) for FSDataInpuStream#seek
* when the position argument is larger than the file size.
*/
@Test(expected=IOException.class) public void testSeekPastFileSize() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
try {
Path seekFile=new Path("seekboundaries.dat");
DFSTestUtil.createFile(fs,seekFile,ONEMB,ONEMB,fs.getDefaultBlockSize(seekFile),fs.getDefaultReplication(seekFile),seed);
FSDataInputStream stream=fs.open(seekFile);
stream.seek(65536);
assertEquals(65536,stream.getPos());
stream.seek(ONEMB + ONEMB + ONEMB);
}
finally {
fs.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests mod time change at close in DFS.
*/
@Test public void testTimesAtClose() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
int replicas=1;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ",numDatanodes,info.length);
FileSystem fileSys=cluster.getFileSystem();
assertTrue(fileSys instanceof DistributedFileSystem);
try {
Path file1=new Path("/simple.dat");
FSDataOutputStream stm=writeFile(fileSys,file1,replicas);
System.out.println("Created and wrote file simple.dat");
FileStatus statBeforeClose=fileSys.getFileStatus(file1);
long mtimeBeforeClose=statBeforeClose.getModificationTime();
String mdateBeforeClose=dateForm.format(new Date(mtimeBeforeClose));
System.out.println("mtime on " + file1 + " before close is "+ mdateBeforeClose+ " ("+ mtimeBeforeClose+ ")");
assertTrue(mtimeBeforeClose != 0);
stm.close();
System.out.println("Closed file.");
FileStatus statAfterClose=fileSys.getFileStatus(file1);
long mtimeAfterClose=statAfterClose.getModificationTime();
String mdateAfterClose=dateForm.format(new Date(mtimeAfterClose));
System.out.println("mtime on " + file1 + " after close is "+ mdateAfterClose+ " ("+ mtimeAfterClose+ ")");
assertTrue(mtimeAfterClose != 0);
assertTrue(mtimeBeforeClose != mtimeAfterClose);
cleanupFile(fileSys,file1);
}
catch ( IOException e) {
info=client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
}
finally {
fileSys.close();
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests mod & access time in DFS.
*/
@Test public void testTimes() throws IOException {
Configuration conf=new HdfsConfiguration();
final int MAX_IDLE_TIME=2000;
conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
final int nnport=cluster.getNameNodePort();
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ",numDatanodes,info.length);
FileSystem fileSys=cluster.getFileSystem();
int replicas=1;
assertTrue(fileSys instanceof DistributedFileSystem);
try {
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1=new Path("testdir1");
Path file1=new Path(dir1,"test1.dat");
FSDataOutputStream stm=writeFile(fileSys,file1,replicas);
FileStatus stat=fileSys.getFileStatus(file1);
long atimeBeforeClose=stat.getAccessTime();
String adate=dateForm.format(new Date(atimeBeforeClose));
System.out.println("atime on " + file1 + " before close is "+ adate+ " ("+ atimeBeforeClose+ ")");
assertTrue(atimeBeforeClose != 0);
stm.close();
stat=fileSys.getFileStatus(file1);
long atime1=stat.getAccessTime();
long mtime1=stat.getModificationTime();
adate=dateForm.format(new Date(atime1));
String mdate=dateForm.format(new Date(mtime1));
System.out.println("atime on " + file1 + " is "+ adate+ " ("+ atime1+ ")");
System.out.println("mtime on " + file1 + " is "+ mdate+ " ("+ mtime1+ ")");
assertTrue(atime1 != 0);
stat=fileSys.getFileStatus(dir1);
long mdir1=stat.getAccessTime();
assertTrue(mdir1 == 0);
long atime2=atime1 - (24L * 3600L * 1000L);
fileSys.setTimes(file1,-1,atime2);
stat=fileSys.getFileStatus(file1);
long atime3=stat.getAccessTime();
String adate3=dateForm.format(new Date(atime3));
System.out.println("new atime on " + file1 + " is "+ adate3+ " ("+ atime3+ ")");
assertTrue(atime2 == atime3);
assertTrue(mtime1 == stat.getModificationTime());
long mtime2=mtime1 - (3600L * 1000L);
fileSys.setTimes(file1,mtime2,-1);
stat=fileSys.getFileStatus(file1);
long mtime3=stat.getModificationTime();
String mdate3=dateForm.format(new Date(mtime3));
System.out.println("new mtime on " + file1 + " is "+ mdate3+ " ("+ mtime3+ ")");
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime2 == mtime3);
long mtime4=Time.now() - (3600L * 1000L);
long atime4=Time.now();
fileSys.setTimes(dir1,mtime4,atime4);
stat=fileSys.getFileStatus(dir1);
assertTrue("Not matching the modification times",mtime4 == stat.getModificationTime());
assertTrue("Not matching the access times",atime4 == stat.getAccessTime());
Path nonExistingDir=new Path(dir1,"/nonExistingDir/");
try {
fileSys.setTimes(nonExistingDir,mtime4,atime4);
fail("Expecting FileNotFoundException");
}
catch ( FileNotFoundException e) {
assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist."));
}
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
}
catch ( InterruptedException e) {
}
cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
System.out.println("Verifying times after cluster restart");
stat=fileSys.getFileStatus(file1);
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime3 == stat.getModificationTime());
cleanupFile(fileSys,file1);
cleanupFile(fileSys,dir1);
}
catch ( IOException e) {
info=client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
}
finally {
fileSys.close();
cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testClientAccessPrivilegeForRemove() throws Exception {
config.set("dfs.nfs.exports.allowed.hosts","* ro");
Nfs3 nfs=new Nfs3(config);
nfs.startServiceInternal(false);
RpcProgramNfs3 nfsd=(RpcProgramNfs3)nfs.getRpcProgram();
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("f1");
REMOVE3Response response=nfsd.remove(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response.getStatus());
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test public void testGetUserGroupInformationSecure() throws IOException {
String userName="user1";
String currentUser="test-user";
NfsConfiguration conf=new NfsConfiguration();
UserGroupInformation currentUserUgi=UserGroupInformation.createRemoteUser(currentUser);
currentUserUgi.setAuthenticationMethod(KERBEROS);
UserGroupInformation.setLoginUser(currentUserUgi);
DFSClientCache cache=new DFSClientCache(conf);
UserGroupInformation ugiResult=cache.getUserGroupInformation(userName,currentUserUgi);
assertThat(ugiResult.getUserName(),is(userName));
assertThat(ugiResult.getRealUser(),is(currentUserUgi));
assertThat(ugiResult.getAuthenticationMethod(),is(UserGroupInformation.AuthenticationMethod.PROXY));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testEviction() throws IOException {
NfsConfiguration conf=new NfsConfiguration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY,"hdfs://localhost");
final int MAX_CACHE_SIZE=2;
DFSClientCache cache=new DFSClientCache(conf,MAX_CACHE_SIZE);
DFSClient c1=cache.getDfsClient("test1");
assertTrue(cache.getDfsClient("test1").toString().contains("ugi=test1"));
assertEquals(c1,cache.getDfsClient("test1"));
assertFalse(isDfsClientClose(c1));
cache.getDfsClient("test2");
assertTrue(isDfsClientClose(c1));
assertEquals(MAX_CACHE_SIZE - 1,cache.clientCache.size());
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test public void testGetUserGroupInformation() throws IOException {
String userName="user1";
String currentUser="currentUser";
UserGroupInformation currentUserUgi=UserGroupInformation.createUserForTesting(currentUser,new String[0]);
NfsConfiguration conf=new NfsConfiguration();
conf.set(FileSystem.FS_DEFAULT_NAME_KEY,"hdfs://localhost");
DFSClientCache cache=new DFSClientCache(conf);
UserGroupInformation ugiResult=cache.getUserGroupInformation(userName,currentUserUgi);
assertThat(ugiResult.getUserName(),is(userName));
assertThat(ugiResult.getRealUser(),is(currentUserUgi));
assertThat(ugiResult.getAuthenticationMethod(),is(UserGroupInformation.AuthenticationMethod.PROXY));
}
InternalCallVerifier BooleanVerifier
@Test public void testExportPoint() throws IOException {
NfsConfiguration config=new NfsConfiguration();
MiniDFSCluster cluster=null;
String exportPoint="/myexport1";
config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,exportPoint);
config.setInt("nfs3.mountd.port",0);
config.setInt("nfs3.server.port",0);
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
final Nfs3 nfsServer=new Nfs3(config);
nfsServer.startServiceInternal(false);
Mountd mountd=nfsServer.getMountd();
RpcProgramMountd rpcMount=(RpcProgramMountd)mountd.getRpcProgram();
assertTrue(rpcMount.getExports().size() == 1);
String exportInMountd=rpcMount.getExports().get(0);
assertTrue(exportInMountd.equals(exportPoint));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testGetAccessRightsForUserGroup() throws IOException {
Nfs3FileAttributes attr=Mockito.mock(Nfs3FileAttributes.class);
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(3);
Mockito.when(attr.getMode()).thenReturn(448);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("No access should be allowed as UID does not match attribute over mode 700",0,Nfs3Utils.getAccessRightsForUserGroup(3,3,null,attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(3);
Mockito.when(attr.getMode()).thenReturn(56);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("No access should be allowed as GID does not match attribute over mode 070",0,Nfs3Utils.getAccessRightsForUserGroup(2,4,null,attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(3);
Mockito.when(attr.getMode()).thenReturn(7);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("Access should be allowed as mode is 007 and UID/GID do not match",61,Nfs3Utils.getAccessRightsForUserGroup(1,4,new int[]{5,6},attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
Mockito.when(attr.getMode()).thenReturn(288);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue());
assertEquals("Access should be allowed as mode is 440 and Aux GID does match",1,Nfs3Utils.getAccessRightsForUserGroup(3,4,new int[]{5,16,10},attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
Mockito.when(attr.getMode()).thenReturn(448);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
assertEquals("Access should be allowed for dir as mode is 700 and UID does match",31,Nfs3Utils.getAccessRightsForUserGroup(2,4,new int[]{5,16,10},attr));
assertEquals("No access should be allowed for dir as mode is 700 even though GID does match",0,Nfs3Utils.getAccessRightsForUserGroup(3,10,new int[]{5,16,4},attr));
assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match",0,Nfs3Utils.getAccessRightsForUserGroup(3,20,new int[]{5,10},attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
Mockito.when(attr.getMode()).thenReturn(457);
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
assertEquals("Access should be allowed for dir as mode is 711 and GID matches",2,Nfs3Utils.getAccessRightsForUserGroup(3,10,new int[]{5,16,11},attr));
}
InternalCallVerifier BooleanVerifier
@Test public void testScan() throws IOException, InterruptedException {
NfsConfiguration conf=new NfsConfiguration();
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY,2);
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long)0);
OpenFileCtx context1=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context2=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context3=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context4=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtxCache cache=new OpenFileCtxCache(conf,10 * 60 * 100);
boolean ret=cache.put(new FileHandle(1),context1);
assertTrue(ret);
ret=cache.put(new FileHandle(2),context2);
assertTrue(ret);
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + 1);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 0);
ret=cache.put(new FileHandle(3),context3);
assertTrue(ret);
ret=cache.put(new FileHandle(4),context4);
assertTrue(ret);
context3.setActiveStatusForTest(false);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
assertTrue(cache.size() == 1);
assertTrue(cache.get(new FileHandle(3)) == null);
assertTrue(cache.get(new FileHandle(4)) != null);
}
InternalCallVerifier BooleanVerifier
@Test public void testEviction() throws IOException, InterruptedException {
NfsConfiguration conf=new NfsConfiguration();
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY,2);
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long)0);
OpenFileCtx context1=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context2=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context3=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context4=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtx context5=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
OpenFileCtxCache cache=new OpenFileCtxCache(conf,10 * 60 * 100);
boolean ret=cache.put(new FileHandle(1),context1);
assertTrue(ret);
Thread.sleep(1000);
ret=cache.put(new FileHandle(2),context2);
assertTrue(ret);
ret=cache.put(new FileHandle(3),context3);
assertFalse(ret);
assertTrue(cache.size() == 2);
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 2);
ret=cache.put(new FileHandle(3),context3);
assertTrue(ret);
assertTrue(cache.size() == 2);
assertTrue(cache.get(new FileHandle(1)) == null);
context3.setActiveStatusForTest(false);
ret=cache.put(new FileHandle(4),context4);
assertTrue(ret);
context2.getPendingWritesForTest().put(new OffsetRange(0,100),new WriteCtx(null,0,0,0,null,null,null,0,false,null));
context4.getPendingCommitsForTest().put(new Long(100),new CommitCtx(0,null,0,attr));
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
ret=cache.put(new FileHandle(5),context5);
assertFalse(ret);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testReaddirPlus() throws IOException {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
xdr_req.writeInt(1000);
READDIRPLUS3Response responsePlus=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
List direntPlus=responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 5);
status=nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id=status.getFileId();
xdr_req=new XDR();
handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(f2Id);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
xdr_req.writeInt(1000);
responsePlus=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
direntPlus=responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 1);
EntryPlus3 entryPlus=direntPlus.get(0);
assertTrue(entryPlus.getName().equals("f3"));
hdfs.delete(new Path(testdir + "/f2"),false);
responsePlus=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
direntPlus=responsePlus.getDirListPlus().getEntries();
assertTrue(direntPlus.size() == 2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testReaddirBasic() throws IOException {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
READDIR3Response response=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
List dirents=response.getDirList().getEntries();
assertTrue(dirents.size() == 5);
status=nn.getRpcServer().getFileInfo(testdir + "/f2");
long f2Id=status.getFileId();
xdr_req=new XDR();
handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(f2Id);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
response=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
dirents=response.getDirList().getEntries();
assertTrue(dirents.size() == 1);
Entry3 entry=dirents.get(0);
assertTrue(entry.getName().equals("f3"));
hdfs.delete(new Path(testdir + "/f2"),false);
response=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
dirents=response.getDirList().getEntries();
assertTrue(dirents.size() == 2);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRmdir() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("foo");
RMDIR3Response response1=nfsd.rmdir(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
RMDIR3Response response2=nfsd.rmdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testAccess() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
XDR xdr_req=new XDR();
handle.serialize(xdr_req);
ACCESS3Response response1=nfsd.access(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
ACCESS3Response response2=nfsd.access(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testCreate() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
xdr_req.writeInt(Nfs3Constant.CREATE_UNCHECKED);
SetAttr3 symAttr=new SetAttr3();
symAttr.serialize(xdr_req);
CREATE3Response response1=nfsd.create(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
CREATE3Response response2=nfsd.create(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=1000) public void testIdempotent(){
Object[][] procedures={{Nfs3Constant.NFSPROC3.NULL,1},{Nfs3Constant.NFSPROC3.GETATTR,1},{Nfs3Constant.NFSPROC3.SETATTR,1},{Nfs3Constant.NFSPROC3.LOOKUP,1},{Nfs3Constant.NFSPROC3.ACCESS,1},{Nfs3Constant.NFSPROC3.READLINK,1},{Nfs3Constant.NFSPROC3.READ,1},{Nfs3Constant.NFSPROC3.WRITE,1},{Nfs3Constant.NFSPROC3.CREATE,0},{Nfs3Constant.NFSPROC3.MKDIR,0},{Nfs3Constant.NFSPROC3.SYMLINK,0},{Nfs3Constant.NFSPROC3.MKNOD,0},{Nfs3Constant.NFSPROC3.REMOVE,0},{Nfs3Constant.NFSPROC3.RMDIR,0},{Nfs3Constant.NFSPROC3.RENAME,0},{Nfs3Constant.NFSPROC3.LINK,0},{Nfs3Constant.NFSPROC3.READDIR,1},{Nfs3Constant.NFSPROC3.READDIRPLUS,1},{Nfs3Constant.NFSPROC3.FSSTAT,1},{Nfs3Constant.NFSPROC3.FSINFO,1},{Nfs3Constant.NFSPROC3.PATHCONF,1},{Nfs3Constant.NFSPROC3.COMMIT,1}};
for ( Object[] procedure : procedures) {
boolean idempotent=procedure[1].equals(Integer.valueOf(1));
Nfs3Constant.NFSPROC3 proc=(Nfs3Constant.NFSPROC3)procedure[0];
if (idempotent) {
Assert.assertTrue(("Procedure " + proc + " should be idempotent"),proc.isIdempotent());
}
else {
Assert.assertFalse(("Procedure " + proc + " should be non-idempotent"),proc.isIdempotent());
}
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testLookup() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
LOOKUP3Request lookupReq=new LOOKUP3Request(handle,"bar");
XDR xdr_req=new XDR();
lookupReq.serialize(xdr_req);
LOOKUP3Response response1=nfsd.lookup(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
LOOKUP3Response response2=nfsd.lookup(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testReadlink() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
SetAttr3 symAttr=new SetAttr3();
symAttr.serialize(xdr_req);
xdr_req.writeString("bar");
SYMLINK3Response response=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response.getStatus());
FileHandle handle2=response.getObjFileHandle();
XDR xdr_req2=new XDR();
handle2.serialize(xdr_req2);
READLINK3Response response1=nfsd.readlink(xdr_req2.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
READLINK3Response response2=nfsd.readlink(xdr_req2.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testPathconf() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
XDR xdr_req=new XDR();
handle.serialize(xdr_req);
PATHCONF3Response response1=nfsd.pathconf(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
PATHCONF3Response response2=nfsd.pathconf(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testMkdir() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
SetAttr3 symAttr=new SetAttr3();
symAttr.serialize(xdr_req);
xdr_req.writeString("bar");
SYMLINK3Response response1=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
SYMLINK3Response response2=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testSymlink() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
SetAttr3 symAttr=new SetAttr3();
symAttr.serialize(xdr_req);
xdr_req.writeString("bar");
SYMLINK3Response response1=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
SYMLINK3Response response2=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testGetattr() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
XDR xdr_req=new XDR();
handle.serialize(xdr_req);
GETATTR3Response response1=nfsd.getattr(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
GETATTR3Response response2=nfsd.getattr(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testReaddir() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
XDR xdr_req=new XDR();
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(100);
READDIR3Response response1=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
READDIR3Response response2=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testFsinfo() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
XDR xdr_req=new XDR();
handle.serialize(xdr_req);
FSINFO3Response response1=nfsd.fsinfo(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
FSINFO3Response response2=nfsd.fsinfo(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testFsstat() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
XDR xdr_req=new XDR();
handle.serialize(xdr_req);
FSSTAT3Response response1=nfsd.fsstat(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
FSSTAT3Response response2=nfsd.fsstat(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testWrite() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
byte[] buffer=new byte[10];
for (int i=0; i < 10; i++) {
buffer[i]=(byte)i;
}
WRITE3Request writeReq=new WRITE3Request(handle,0,10,WriteStableHow.DATA_SYNC,ByteBuffer.wrap(buffer));
XDR xdr_req=new XDR();
writeReq.serialize(xdr_req);
WRITE3Response response1=nfsd.write(xdr_req.asReadOnlyWrap(),null,1,securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
WRITE3Response response2=nfsd.write(xdr_req.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect response:",null,response2);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRead() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
READ3Request readReq=new READ3Request(handle,0,5);
XDR xdr_req=new XDR();
readReq.serialize(xdr_req);
READ3Response response1=nfsd.read(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
READ3Response response2=nfsd.read(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testReaddirplus() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
XDR xdr_req=new XDR();
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(3);
xdr_req.writeInt(2);
READDIRPLUS3Response response1=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
READDIRPLUS3Response response2=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testCommit() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar");
long dirId=status.getFileId();
FileHandle handle=new FileHandle(dirId);
XDR xdr_req=new XDR();
handle.serialize(xdr_req);
xdr_req.writeLongAsHyper(0);
xdr_req.writeInt(5);
Channel ch=Mockito.mock(Channel.class);
COMMIT3Response response1=nfsd.commit(xdr_req.asReadOnlyWrap(),ch,1,securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
COMMIT3Response response2=nfsd.commit(xdr_req.asReadOnlyWrap(),ch,1,securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect COMMIT3Response:",null,response2);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRename() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("bar");
handle.serialize(xdr_req);
xdr_req.writeString("fubar");
RENAME3Response response1=nfsd.rename(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
RENAME3Response response2=nfsd.rename(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testSetattr() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("bar");
SetAttr3 symAttr=new SetAttr3();
symAttr.serialize(xdr_req);
xdr_req.writeBoolean(false);
SETATTR3Response response1=nfsd.setattr(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
SETATTR3Response response2=nfsd.setattr(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code",Nfs3Status.NFS3_OK,response2.getStatus());
}
InternalCallVerifier BooleanVerifier
@Test public void testDeprecatedKeys(){
NfsConfiguration conf=new NfsConfiguration();
conf.setInt("nfs3.server.port",998);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,0) == 998);
conf.setInt("nfs3.mountd.port",999);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,0) == 999);
conf.set("dfs.nfs.exports.allowed.hosts","host1");
assertTrue(conf.get(CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY).equals("host1"));
conf.setInt("dfs.nfs.exports.cache.expirytime.millis",1000);
assertTrue(conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,0) == 1000);
conf.setInt("hadoop.nfs.userupdate.milly",10);
assertTrue(conf.getInt(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,0) == 10);
conf.set("dfs.nfs3.dump.dir","/nfs/tmp");
assertTrue(conf.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY).equals("/nfs/tmp"));
conf.setBoolean("dfs.nfs3.enableDump",false);
assertTrue(conf.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY,true) == false);
conf.setInt("dfs.nfs3.max.open.files",500);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY,0) == 500);
conf.setInt("dfs.nfs3.stream.timeout",6000);
assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY,0) == 6000);
conf.set("dfs.nfs3.export.point","/dir1");
assertTrue(conf.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY).equals("/dir1"));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRemove() throws Exception {
HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir);
long dirId=status.getFileId();
XDR xdr_req=new XDR();
FileHandle handle=new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("bar");
REMOVE3Response response1=nfsd.remove(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus());
REMOVE3Response response2=nfsd.remove(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCheckCommit() throws IOException {
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long)0);
OpenFileCtx ctx=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()));
COMMIT_STATUS ret;
ctx.setActiveStatusForTest(false);
Channel ch=Mockito.mock(Channel.class);
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
ctx.getPendingWritesForTest().put(new OffsetRange(5,10),new WriteCtx(null,0,0,0,null,null,null,0,false,null));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long)10);
COMMIT_STATUS status=ctx.checkCommitInternal(5,null,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
ret=ctx.checkCommit(dfsClient,5,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
status=ctx.checkCommitInternal(10,ch,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
ret=ctx.checkCommit(dfsClient,10,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
ConcurrentNavigableMap commits=ctx.getPendingCommitsForTest();
Assert.assertTrue(commits.size() == 0);
ret=ctx.checkCommit(dfsClient,11,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
long key=commits.firstKey();
Assert.assertTrue(key == 11);
commits.remove(new Long(11));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
key=commits.firstKey();
Assert.assertTrue(key == 9);
ctx.getPendingWritesForTest().remove(new OffsetRange(5,10));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
@Test public void testAlterWriteRequest() throws IOException {
int len=20;
byte[] data=new byte[len];
ByteBuffer buffer=ByteBuffer.wrap(data);
for (int i=0; i < len; i++) {
buffer.put((byte)i);
}
buffer.flip();
int originalCount=buffer.array().length;
WRITE3Request request=new WRITE3Request(new FileHandle(),0,data.length,WriteStableHow.UNSTABLE,buffer);
WriteCtx writeCtx1=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),WriteCtx.INVALID_ORIGINAL_COUNT,request.getStableHow(),request.getData(),null,1,false,WriteCtx.DataState.NO_DUMP);
Assert.assertTrue(writeCtx1.getData().array().length == originalCount);
OpenFileCtx.alterWriteRequest(request,12);
WriteCtx writeCtx2=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),originalCount,request.getStableHow(),request.getData(),null,2,false,WriteCtx.DataState.NO_DUMP);
ByteBuffer appendedData=writeCtx2.getData();
int position=appendedData.position();
int limit=appendedData.limit();
Assert.assertTrue(position == 12);
Assert.assertTrue(limit - position == 8);
Assert.assertTrue(appendedData.get(position) == (byte)12);
Assert.assertTrue(appendedData.get(position + 1) == (byte)13);
Assert.assertTrue(appendedData.get(position + 2) == (byte)14);
Assert.assertTrue(appendedData.get(position + 7) == (byte)19);
buffer.position(0);
request=new WRITE3Request(new FileHandle(),0,data.length,WriteStableHow.UNSTABLE,buffer);
OpenFileCtx.alterWriteRequest(request,1);
WriteCtx writeCtx3=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),originalCount,request.getStableHow(),request.getData(),null,2,false,WriteCtx.DataState.NO_DUMP);
appendedData=writeCtx3.getData();
position=appendedData.position();
limit=appendedData.limit();
Assert.assertTrue(position == 1);
Assert.assertTrue(limit - position == 19);
Assert.assertTrue(appendedData.get(position) == (byte)1);
Assert.assertTrue(appendedData.get(position + 18) == (byte)19);
buffer.position(0);
request=new WRITE3Request(new FileHandle(),0,data.length,WriteStableHow.UNSTABLE,buffer);
OpenFileCtx.alterWriteRequest(request,19);
WriteCtx writeCtx4=new WriteCtx(request.getHandle(),request.getOffset(),request.getCount(),originalCount,request.getStableHow(),request.getData(),null,2,false,WriteCtx.DataState.NO_DUMP);
appendedData=writeCtx4.getData();
position=appendedData.position();
limit=appendedData.limit();
Assert.assertTrue(position == 19);
Assert.assertTrue(limit - position == 1);
Assert.assertTrue(appendedData.get(position) == (byte)19);
}
InternalCallVerifier BooleanVerifier
@Test public void testOOOWrites() throws IOException, InterruptedException {
NfsConfiguration config=new NfsConfiguration();
MiniDFSCluster cluster=null;
RpcProgramNfs3 nfsd;
final int bufSize=32;
final int numOOO=3;
SecurityHandler securityHandler=Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser=System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser),"*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser),"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
config.setInt("nfs3.mountd.port",0);
config.setInt("nfs3.server.port",0);
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
Nfs3 nfs3=new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd=(RpcProgramNfs3)nfs3.getRpcProgram();
DFSClient dfsClient=new DFSClient(NameNode.getAddress(config),config);
HdfsFileStatus status=dfsClient.getFileInfo("/");
FileHandle rootHandle=new FileHandle(status.getFileId());
CREATE3Request createReq=new CREATE3Request(rootHandle,"out-of-order-write" + System.currentTimeMillis(),Nfs3Constant.CREATE_UNCHECKED,new SetAttr3(),0);
XDR createXdr=new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp=nfsd.create(createXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
FileHandle handle=createRsp.getObjHandle();
byte[][] oooBuf=new byte[numOOO][bufSize];
for (int i=0; i < numOOO; i++) {
Arrays.fill(oooBuf[i],(byte)i);
}
for (int i=0; i < numOOO; i++) {
final long offset=(numOOO - 1 - i) * bufSize;
WRITE3Request writeReq=new WRITE3Request(handle,offset,bufSize,WriteStableHow.UNSTABLE,ByteBuffer.wrap(oooBuf[i]));
XDR writeXdr=new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234));
}
waitWrite(nfsd,handle,60000);
READ3Request readReq=new READ3Request(handle,bufSize,bufSize);
XDR readXdr=new XDR();
readReq.serialize(readXdr);
READ3Response readRsp=nfsd.read(readXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
assertTrue(Arrays.equals(oooBuf[1],readRsp.getData().array()));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testWriteStableHow() throws IOException, InterruptedException {
NfsConfiguration config=new NfsConfiguration();
DFSClient client=null;
MiniDFSCluster cluster=null;
RpcProgramNfs3 nfsd;
SecurityHandler securityHandler=Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser=System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser),"*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser),"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
client=new DFSClient(NameNode.getAddress(config),config);
config.setInt("nfs3.mountd.port",0);
config.setInt("nfs3.server.port",0);
Nfs3 nfs3=new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd=(RpcProgramNfs3)nfs3.getRpcProgram();
HdfsFileStatus status=client.getFileInfo("/");
FileHandle rootHandle=new FileHandle(status.getFileId());
CREATE3Request createReq=new CREATE3Request(rootHandle,"file1",Nfs3Constant.CREATE_UNCHECKED,new SetAttr3(),0);
XDR createXdr=new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp=nfsd.create(createXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
FileHandle handle=createRsp.getObjHandle();
byte[] buffer=new byte[10];
for (int i=0; i < 10; i++) {
buffer[i]=(byte)i;
}
WRITE3Request writeReq=new WRITE3Request(handle,0,10,WriteStableHow.DATA_SYNC,ByteBuffer.wrap(buffer));
XDR writeXdr=new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234));
waitWrite(nfsd,handle,60000);
READ3Request readReq=new READ3Request(handle,0,10);
XDR readXdr=new XDR();
readReq.serialize(readXdr);
READ3Response readRsp=nfsd.read(readXdr.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertTrue(Arrays.equals(buffer,readRsp.getData().array()));
CREATE3Request createReq2=new CREATE3Request(rootHandle,"file2",Nfs3Constant.CREATE_UNCHECKED,new SetAttr3(),0);
XDR createXdr2=new XDR();
createReq2.serialize(createXdr2);
CREATE3Response createRsp2=nfsd.create(createXdr2.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
FileHandle handle2=createRsp2.getObjHandle();
WRITE3Request writeReq2=new WRITE3Request(handle2,0,10,WriteStableHow.FILE_SYNC,ByteBuffer.wrap(buffer));
XDR writeXdr2=new XDR();
writeReq2.serialize(writeXdr2);
nfsd.write(writeXdr2.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234));
waitWrite(nfsd,handle2,60000);
READ3Request readReq2=new READ3Request(handle2,0,10);
XDR readXdr2=new XDR();
readReq2.serialize(readXdr2);
READ3Response readRsp2=nfsd.read(readXdr2.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234));
assertTrue(Arrays.equals(buffer,readRsp2.getData().array()));
status=client.getFileInfo("/file2");
assertTrue(status.getLen() == 10);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCheckCommitFromRead() throws IOException {
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long)0);
NfsConfiguration config=new NfsConfiguration();
OpenFileCtx ctx=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(config));
FileHandle h=new FileHandle(1);
COMMIT_STATUS ret;
WriteManager wm=new WriteManager(new IdUserGroup(config),config,false);
assertTrue(wm.addOpenFileStream(h,ctx));
ctx.setActiveStatusForTest(false);
Channel ch=Mockito.mock(Channel.class);
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_CTX,ret);
assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,0));
ctx.getPendingWritesForTest().put(new OffsetRange(5,10),new WriteCtx(null,0,0,0,null,null,null,0,false,null));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE,ret);
assertEquals(Nfs3Status.NFS3ERR_IO,wm.commitBeforeRead(dfsClient,h,0));
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long)10);
COMMIT_STATUS status=ctx.checkCommitInternal(5,ch,1,attr,false);
assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC,status);
ret=ctx.checkCommit(dfsClient,5,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret);
assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,5));
status=ctx.checkCommitInternal(10,ch,1,attr,true);
assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
ret=ctx.checkCommit(dfsClient,10,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret);
assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,10));
ConcurrentNavigableMap commits=ctx.getPendingCommitsForTest();
assertTrue(commits.size() == 0);
ret=ctx.checkCommit(dfsClient,11,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_WAIT,ret);
assertEquals(0,commits.size());
assertEquals(Nfs3Status.NFS3ERR_JUKEBOX,wm.commitBeforeRead(dfsClient,h,11));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_WAIT,ret);
assertEquals(0,commits.size());
assertEquals(Nfs3Status.NFS3ERR_JUKEBOX,wm.commitBeforeRead(dfsClient,h,0));
ctx.getPendingWritesForTest().remove(new OffsetRange(5,10));
ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true);
assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret);
assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,0));
}
InternalCallVerifier BooleanVerifier
@Test public void testCheckCommitAixCompatMode() throws IOException {
DFSClient dfsClient=Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr=new Nfs3FileAttributes();
HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class);
OpenFileCtx ctx=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(new NfsConfiguration()),true);
Mockito.when(fos.getPos()).thenReturn((long)2);
COMMIT_STATUS status=ctx.checkCommitInternal(5,null,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_FINISHED);
Mockito.when(fos.getPos()).thenReturn((long)10);
status=ctx.checkCommitInternal(5,null,1,attr,false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test to make sure NameNode.Feature support previous features
*/
@Test public void testNameNodeFeature(){
final LayoutFeature first=NameNodeLayoutVersion.Feature.ROLLING_UPGRADE;
assertTrue(NameNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test to make sure DataNode.Feature support previous features
*/
@Test public void testDataNodeFeature(){
final LayoutFeature first=DataNodeLayoutVersion.Feature.FIRST_LAYOUT;
assertTrue(DataNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion()));
assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertCheckpointSignature(){
CheckpointSignature s=new CheckpointSignature(getStorageInfo(NodeType.NAME_NODE),"bpid",100,1);
CheckpointSignatureProto sProto=PBHelper.convert(s);
CheckpointSignature s1=PBHelper.convert(sProto);
assertEquals(s.getBlockpoolID(),s1.getBlockpoolID());
assertEquals(s.getClusterID(),s1.getClusterID());
assertEquals(s.getCTime(),s1.getCTime());
assertEquals(s.getCurSegmentTxId(),s1.getCurSegmentTxId());
assertEquals(s.getLayoutVersion(),s1.getLayoutVersion());
assertEquals(s.getMostRecentCheckpointTxId(),s1.getMostRecentCheckpointTxId());
assertEquals(s.getNamespaceID(),s1.getNamespaceID());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertRecoveringBlock(){
DatanodeInfo di1=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo=new DatanodeInfo[]{di1,di2};
RecoveringBlock b=new RecoveringBlock(getExtendedBlock(),dnInfo,3);
RecoveringBlockProto bProto=PBHelper.convert(b);
RecoveringBlock b1=PBHelper.convert(bProto);
assertEquals(b.getBlock(),b1.getBlock());
DatanodeInfo[] dnInfo1=b1.getLocations();
assertEquals(dnInfo.length,dnInfo1.length);
for (int i=0; i < dnInfo.length; i++) {
compare(dnInfo[0],dnInfo1[0]);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertRemoteEditLogManifest(){
List logs=new ArrayList();
logs.add(new RemoteEditLog(1,10));
logs.add(new RemoteEditLog(11,20));
RemoteEditLogManifest m=new RemoteEditLogManifest(logs);
RemoteEditLogManifestProto mProto=PBHelper.convert(m);
RemoteEditLogManifest m1=PBHelper.convert(mProto);
List logs1=m1.getLogs();
assertEquals(logs.size(),logs1.size());
for (int i=0; i < logs.size(); i++) {
compare(logs.get(i),logs1.get(i));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertNamenodeRegistration(){
StorageInfo info=getStorageInfo(NodeType.NAME_NODE);
NamenodeRegistration reg=new NamenodeRegistration("address:999","http:1000",info,NamenodeRole.NAMENODE);
NamenodeRegistrationProto regProto=PBHelper.convert(reg);
NamenodeRegistration reg2=PBHelper.convert(regProto);
assertEquals(reg.getAddress(),reg2.getAddress());
assertEquals(reg.getClusterID(),reg2.getClusterID());
assertEquals(reg.getCTime(),reg2.getCTime());
assertEquals(reg.getHttpAddress(),reg2.getHttpAddress());
assertEquals(reg.getLayoutVersion(),reg2.getLayoutVersion());
assertEquals(reg.getNamespaceID(),reg2.getNamespaceID());
assertEquals(reg.getRegistrationID(),reg2.getRegistrationID());
assertEquals(reg.getRole(),reg2.getRole());
assertEquals(reg.getVersion(),reg2.getVersion());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertExtendedBlock(){
ExtendedBlock b=getExtendedBlock();
ExtendedBlockProto bProto=PBHelper.convert(b);
ExtendedBlock b1=PBHelper.convert(bProto);
assertEquals(b,b1);
b.setBlockId(-1);
bProto=PBHelper.convert(b);
b1=PBHelper.convert(bProto);
assertEquals(b,b1);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertStoragInfo(){
StorageInfo info=getStorageInfo(NodeType.NAME_NODE);
StorageInfoProto infoProto=PBHelper.convert(info);
StorageInfo info2=PBHelper.convert(infoProto,NodeType.NAME_NODE);
assertEquals(info.getClusterID(),info2.getClusterID());
assertEquals(info.getCTime(),info2.getCTime());
assertEquals(info.getLayoutVersion(),info2.getLayoutVersion());
assertEquals(info.getNamespaceID(),info2.getNamespaceID());
}
InternalCallVerifier EqualityVerifier
@Test public void testConvertText(){
Text t=new Text("abc".getBytes());
String s=t.toString();
Text t1=new Text(s);
assertEquals(t,t1);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertNamespaceInfo(){
NamespaceInfo info=new NamespaceInfo(37,"clusterID","bpID",2300);
NamespaceInfoProto proto=PBHelper.convert(info);
NamespaceInfo info2=PBHelper.convert(proto);
compare(info,info2);
assertEquals(info.getBlockPoolID(),info2.getBlockPoolID());
assertEquals(info.getBuildVersion(),info2.getBuildVersion());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertDatanodeRegistration(){
DatanodeID dnId=DFSTestUtil.getLocalDatanodeID();
BlockKey[] keys=new BlockKey[]{getBlockKey(2),getBlockKey(3)};
ExportedBlockKeys expKeys=new ExportedBlockKeys(true,9,10,getBlockKey(1),keys);
DatanodeRegistration reg=new DatanodeRegistration(dnId,new StorageInfo(NodeType.DATA_NODE),expKeys,"3.0.0");
DatanodeRegistrationProto proto=PBHelper.convert(reg);
DatanodeRegistration reg2=PBHelper.convert(proto);
compare(reg.getStorageInfo(),reg2.getStorageInfo());
compare(reg.getExportedKeys(),reg2.getExportedKeys());
compare(reg,reg2);
assertEquals(reg.getSoftwareVersion(),reg2.getSoftwareVersion());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertBlockCommand(){
Block[] blocks=new Block[]{new Block(21),new Block(22)};
DatanodeInfo[][] dnInfos=new DatanodeInfo[][]{new DatanodeInfo[1],new DatanodeInfo[2]};
dnInfos[0][0]=DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][0]=DFSTestUtil.getLocalDatanodeInfo();
dnInfos[1][1]=DFSTestUtil.getLocalDatanodeInfo();
String[][] storageIDs={{"s00"},{"s10","s11"}};
StorageType[][] storageTypes={{StorageType.DEFAULT},{StorageType.DEFAULT,StorageType.DEFAULT}};
BlockCommand bc=new BlockCommand(DatanodeProtocol.DNA_TRANSFER,"bp1",blocks,dnInfos,storageTypes,storageIDs);
BlockCommandProto bcProto=PBHelper.convert(bc);
BlockCommand bc2=PBHelper.convert(bcProto);
assertEquals(bc.getAction(),bc2.getAction());
assertEquals(bc.getBlocks().length,bc2.getBlocks().length);
Block[] blocks2=bc2.getBlocks();
for (int i=0; i < blocks.length; i++) {
assertEquals(blocks[i],blocks2[i]);
}
DatanodeInfo[][] dnInfos2=bc2.getTargets();
assertEquals(dnInfos.length,dnInfos2.length);
for (int i=0; i < dnInfos.length; i++) {
DatanodeInfo[] d1=dnInfos[i];
DatanodeInfo[] d2=dnInfos2[i];
assertEquals(d1.length,d2.length);
for (int j=0; j < d1.length; j++) {
compare(d1[j],d2[j]);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAclEntryProto(){
AclEntry e1=new AclEntry.Builder().setName("test").setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT).setType(AclEntryType.OTHER).build();
AclEntry e2=new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).setPermission(FsAction.ALL).build();
AclEntry e3=new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).setName("test").build();
AclEntry[] expected=new AclEntry[]{e1,e2,new AclEntry.Builder().setScope(e3.getScope()).setType(e3.getType()).setName(e3.getName()).setPermission(FsAction.NONE).build()};
AclEntry[] actual=Lists.newArrayList(PBHelper.convertAclEntry(PBHelper.convertAclEntryProto(Lists.newArrayList(e1,e2,e3)))).toArray(new AclEntry[0]);
Assert.assertArrayEquals(expected,actual);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertBlocksWithLocations(){
BlockWithLocations[] list=new BlockWithLocations[]{getBlockWithLocations(1),getBlockWithLocations(2)};
BlocksWithLocations locs=new BlocksWithLocations(list);
BlocksWithLocationsProto locsProto=PBHelper.convert(locs);
BlocksWithLocations locs2=PBHelper.convert(locsProto);
BlockWithLocations[] blocks=locs.getBlocks();
BlockWithLocations[] blocks2=locs2.getBlocks();
assertEquals(blocks.length,blocks2.length);
for (int i=0; i < blocks.length; i++) {
compare(blocks[i],blocks2[i]);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConvertBlockRecoveryCommand(){
DatanodeInfo di1=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo di2=DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] dnInfo=new DatanodeInfo[]{di1,di2};
List blks=ImmutableList.of(new RecoveringBlock(getExtendedBlock(1),dnInfo,3),new RecoveringBlock(getExtendedBlock(2),dnInfo,3));
BlockRecoveryCommand cmd=new BlockRecoveryCommand(blks);
BlockRecoveryCommandProto proto=PBHelper.convert(cmd);
assertEquals(1,proto.getBlocks(0).getBlock().getB().getBlockId());
assertEquals(2,proto.getBlocks(1).getBlock().getB().getBlockId());
BlockRecoveryCommand cmd2=PBHelper.convert(proto);
List cmd2Blks=Lists.newArrayList(cmd2.getRecoveringBlocks());
assertEquals(blks.get(0).getBlock(),cmd2Blks.get(0).getBlock());
assertEquals(blks.get(1).getBlock(),cmd2Blks.get(1).getBlock());
assertEquals(Joiner.on(",").join(blks),Joiner.on(",").join(cmd2Blks));
assertEquals(cmd.toString(),cmd2.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStartStop() throws IOException {
Configuration conf=new Configuration();
MiniJournalCluster c=new MiniJournalCluster.Builder(conf).build();
try {
URI uri=c.getQuorumJournalURI("myjournal");
String[] addrs=uri.getAuthority().split(";");
assertEquals(3,addrs.length);
JournalNode node=c.getJournalNode(0);
String dir=node.getConf().get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY);
assertEquals(new File(MiniDFSCluster.getBaseDirectory() + "journalnode-0").getAbsolutePath(),dir);
}
finally {
c.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testNewNamenodeTakesOverWriter() throws Exception {
File nn1Dir=new File(MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1");
File nn2Dir=new File(MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn2");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nn1Dir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI("myjournal").toString());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
cluster.shutdown();
try {
FileUtil.fullyDelete(nn2Dir);
FileUtil.copy(nn1Dir,FileSystem.getLocal(conf).getRaw(),new Path(nn2Dir.getAbsolutePath()),false,conf);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build();
cluster.getFileSystem().mkdirs(TEST_PATH);
Configuration conf2=new Configuration();
conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nn2Dir.getAbsolutePath());
conf2.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI("myjournal").toString());
MiniDFSCluster cluster2=new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build();
try {
assertTrue(cluster2.getFileSystem().exists(TEST_PATH));
}
finally {
cluster2.shutdown();
}
try {
cluster.getFileSystem().mkdirs(new Path("/x"));
fail("Did not abort trying to write to a fenced NN");
}
catch ( RemoteException re) {
GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage",re);
}
}
finally {
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testLogAndRestart() throws IOException {
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI("myjournal").toString());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).build();
try {
cluster.getFileSystem().mkdirs(TEST_PATH);
cluster.restartNameNode();
assertTrue(cluster.getFileSystem().exists(TEST_PATH));
cluster.getFileSystem().mkdirs(TEST_PATH_2);
cluster.restartNameNode();
assertTrue(cluster.getFileSystem().exists(TEST_PATH));
assertTrue(cluster.getFileSystem().exists(TEST_PATH_2));
}
finally {
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSingleThreaded() throws IOException {
Configuration conf=new Configuration();
MiniJournalCluster cluster=new MiniJournalCluster.Builder(conf).build();
URI uri=cluster.getQuorumJournalURI(JID);
QuorumJournalManager qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO);
try {
qjm.format(FAKE_NSINFO);
}
finally {
qjm.close();
}
try {
for (int i=0; i < 5; i++) {
qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO);
try {
qjm.createNewUniqueEpoch();
assertEquals(i + 1,qjm.getLoggerSetForTests().getEpoch());
}
finally {
qjm.close();
}
}
long prevEpoch=5;
for (int i=0; i < 20; i++) {
long newEpoch=-1;
while (true) {
qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO,new FaultyLoggerFactory());
try {
qjm.createNewUniqueEpoch();
newEpoch=qjm.getLoggerSetForTests().getEpoch();
break;
}
catch ( IOException ioe) {
}
finally {
qjm.close();
}
}
LOG.info("Created epoch " + newEpoch);
assertTrue("New epoch " + newEpoch + " should be greater than previous "+ prevEpoch,newEpoch > prevEpoch);
prevEpoch=newEpoch;
}
}
finally {
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that, if the remote node gets unsynchronized (eg some edits were
* missed or the node rebooted), the client stops sending edits until
* the next roll. Test for HDFS-3726.
*/
@Test public void testStopSendingEditsWhenOutOfSync() throws Exception {
Mockito.doThrow(new IOException("injected error")).when(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA));
try {
ch.sendEdits(1L,1L,1,FAKE_DATA).get();
fail("Injected JOOSE did not cause sendEdits() to throw");
}
catch ( ExecutionException ee) {
GenericTestUtils.assertExceptionContains("injected",ee);
}
Mockito.verify(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA));
assertTrue(ch.isOutOfSync());
try {
ch.sendEdits(1L,2L,1,FAKE_DATA).get();
fail("sendEdits() should throw until next roll");
}
catch ( ExecutionException ee) {
GenericTestUtils.assertExceptionContains("disabled until next roll",ee.getCause());
}
Mockito.verify(mockProxy,Mockito.never()).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(2L),Mockito.eq(1),Mockito.same(FAKE_DATA));
Mockito.verify(mockProxy).heartbeat(Mockito.any());
ch.startLogSegment(3L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
assertFalse(ch.isOutOfSync());
ch.sendEdits(3L,3L,1,FAKE_DATA).get();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test that, once the queue eclipses the configure size limit,
* calls to journal more data are rejected.
*/
@Test public void testQueueLimiting() throws Exception {
DelayAnswer delayer=new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA));
int numToQueue=LIMIT_QUEUE_SIZE_BYTES / FAKE_DATA.length;
for (int i=1; i <= numToQueue; i++) {
ch.sendEdits(1L,(long)i,1,FAKE_DATA);
}
assertEquals(LIMIT_QUEUE_SIZE_BYTES,ch.getQueuedEditsSize());
try {
ch.sendEdits(1L,numToQueue + 1,1,FAKE_DATA).get(1,TimeUnit.SECONDS);
fail("Did not fail to queue more calls after queue was full");
}
catch ( ExecutionException ee) {
if (!(ee.getCause() instanceof LoggerTooFarBehindException)) {
throw ee;
}
}
delayer.proceed();
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return ch.getQueuedEditsSize() == 0;
}
}
,10,1000);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Sets up two of the nodes to each drop a single RPC, at all
* possible combinations of RPCs. This may result in the
* active writer failing to write. After this point, a new writer
* should be able to recover and continue writing without
* data loss.
*/
@Test public void testRecoverAfterDoubleFailures() throws Exception {
final long MAX_IPC_NUMBER=determineMaxIpcNumber();
for (int failA=1; failA <= MAX_IPC_NUMBER; failA++) {
for (int failB=1; failB <= MAX_IPC_NUMBER; failB++) {
String injectionStr="(" + failA + ", "+ failB+ ")";
LOG.info("\n\n-------------------------------------------\n" + "Beginning test, failing at " + injectionStr + "\n"+ "-------------------------------------------\n\n");
MiniJournalCluster cluster=new MiniJournalCluster.Builder(conf).build();
QuorumJournalManager qjm=null;
try {
qjm=createInjectableQJM(cluster);
qjm.format(FAKE_NSINFO);
List loggers=qjm.getLoggerSetForTests().getLoggersForTests();
failIpcNumber(loggers.get(0),failA);
failIpcNumber(loggers.get(1),failB);
int lastAckedTxn=doWorkload(cluster,qjm);
if (lastAckedTxn < 6) {
LOG.info("Failed after injecting failures at " + injectionStr + ". This is expected since we injected a failure in the "+ "majority.");
}
qjm.close();
qjm=null;
qjm=createInjectableQJM(cluster);
long lastRecoveredTxn=QJMTestUtil.recoverAndReturnLastTxn(qjm);
assertTrue(lastRecoveredTxn >= lastAckedTxn);
writeSegment(cluster,qjm,lastRecoveredTxn + 1,3,true);
}
catch ( Throwable t) {
throw new RuntimeException("Test failed with injection: " + injectionStr,t);
}
finally {
cluster.shutdown();
cluster=null;
IOUtils.closeStream(qjm);
qjm=null;
}
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testQuorums() throws Exception {
Map> futures=ImmutableMap.of("f1",SettableFuture.create(),"f2",SettableFuture.create(),"f3",SettableFuture.create());
QuorumCall q=QuorumCall.create(futures);
assertEquals(0,q.countResponses());
futures.get("f1").set("first future");
q.waitFor(1,0,0,100000,"test");
q.waitFor(0,1,0,100000,"test");
assertEquals(1,q.countResponses());
futures.get("f2").setException(new Exception("error"));
assertEquals(2,q.countResponses());
futures.get("f3").set("second future");
q.waitFor(3,0,100,100000,"test");
q.waitFor(0,2,100,100000,"test");
assertEquals(3,q.countResponses());
assertEquals("f1=first future,f3=second future",Joiner.on(",").withKeyValueSeparator("=").join(new TreeMap(q.getResults())));
try {
q.waitFor(0,4,100,10,"test");
fail("Didn't time out waiting for more responses than came back");
}
catch ( TimeoutException te) {
}
}
TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier
@Before public void setup() throws Exception {
conf=new Configuration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0);
cluster=new MiniJournalCluster.Builder(conf).build();
qjm=createSpyingQJM();
spies=qjm.getLoggerSetForTests().getLoggersForTests();
qjm.format(QJMTestUtil.FAKE_NSINFO);
qjm.recoverUnfinalizedSegments();
assertEquals(1,qjm.getLoggerSetForTests().getEpoch());
}
InternalCallVerifier BooleanVerifier
@Test public void testPurgeLogs() throws Exception {
for (int txid=1; txid <= 5; txid++) {
writeSegment(cluster,qjm,txid,1,true);
}
File curDir=cluster.getCurrentDir(0,JID);
GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getFinalizedEditsFileName(1,1),NNStorage.getFinalizedEditsFileName(2,2),NNStorage.getFinalizedEditsFileName(3,3),NNStorage.getFinalizedEditsFileName(4,4),NNStorage.getFinalizedEditsFileName(5,5));
File paxosDir=new File(curDir,"paxos");
GenericTestUtils.assertExists(paxosDir);
assertTrue(new File(paxosDir,"1").createNewFile());
assertTrue(new File(paxosDir,"3").createNewFile());
GenericTestUtils.assertGlobEquals(paxosDir,"\\d+","1","3");
assertTrue(new File(curDir,"edits_inprogress_0000000000000000001.epoch=140").createNewFile());
assertTrue(new File(curDir,"edits_inprogress_0000000000000000002.empty").createNewFile());
qjm.purgeLogsOlderThan(3);
waitForAllPendingCalls(qjm.getLoggerSetForTests());
GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getFinalizedEditsFileName(3,3),NNStorage.getFinalizedEditsFileName(4,4),NNStorage.getFinalizedEditsFileName(5,5));
GenericTestUtils.assertGlobEquals(paxosDir,"\\d+","3");
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReaderWhileAnotherWrites() throws Exception {
QuorumJournalManager readerQjm=closeLater(createSpyingQJM());
List streams=Lists.newArrayList();
readerQjm.selectInputStreams(streams,0,false);
assertEquals(0,streams.size());
writeSegment(cluster,qjm,1,3,true);
readerQjm.selectInputStreams(streams,0,false);
try {
assertEquals(1,streams.size());
EditLogInputStream stream=streams.get(0);
assertEquals(1,stream.getFirstTxId());
assertEquals(3,stream.getLastTxId());
verifyEdits(streams,1,3);
assertNull(stream.readOp());
}
finally {
IOUtils.cleanup(LOG,streams.toArray(new Closeable[0]));
streams.clear();
}
writeSegment(cluster,qjm,4,3,false);
readerQjm.selectInputStreams(streams,0,false);
try {
assertEquals(1,streams.size());
EditLogInputStream stream=streams.get(0);
assertEquals(1,stream.getFirstTxId());
assertEquals(3,stream.getLastTxId());
verifyEdits(streams,1,3);
}
finally {
IOUtils.cleanup(LOG,streams.toArray(new Closeable[0]));
streams.clear();
}
qjm.finalizeLogSegment(4,6);
readerQjm.selectInputStreams(streams,0,false);
try {
assertEquals(2,streams.size());
assertEquals(4,streams.get(1).getFirstTxId());
assertEquals(6,streams.get(1).getLastTxId());
verifyEdits(streams,1,6);
}
finally {
IOUtils.cleanup(LOG,streams.toArray(new Closeable[0]));
streams.clear();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testFormat() throws Exception {
QuorumJournalManager qjm=closeLater(new QuorumJournalManager(conf,cluster.getQuorumJournalURI("testFormat-jid"),FAKE_NSINFO));
assertFalse(qjm.hasSomeData());
qjm.format(FAKE_NSINFO);
assertTrue(qjm.hasSomeData());
}
InternalCallVerifier BooleanVerifier
@Test public void testQuorumOutputStreamReport() throws Exception {
futureReturns(null).when(spyLoggers.get(0)).startLogSegment(Mockito.anyLong(),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(1)).startLogSegment(Mockito.anyLong(),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
futureReturns(null).when(spyLoggers.get(2)).startLogSegment(Mockito.anyLong(),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION));
QuorumOutputStream os=(QuorumOutputStream)qjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
String report=os.generateReport();
Assert.assertFalse("Report should be plain text",report.contains("<"));
}
InternalCallVerifier EqualityVerifier
/**
* Test that, if the writer crashes at the very beginning of a segment,
* before any transactions are written, that the next newEpoch() call
* returns the prior segment txid as its most recent segment.
*/
@Test(timeout=10000) public void testNewEpochAtBeginningOfSegment() throws Exception {
journal.newEpoch(FAKE_NSINFO,1);
journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2),1,1,2,QJMTestUtil.createTxnData(1,2));
journal.finalizeLogSegment(makeRI(3),1,2);
journal.startLogSegment(makeRI(4),3,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
NewEpochResponseProto resp=journal.newEpoch(FAKE_NSINFO,2);
assertEquals(1,resp.getLastSegmentTxId());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testEpochHandling() throws Exception {
assertEquals(0,journal.getLastPromisedEpoch());
NewEpochResponseProto newEpoch=journal.newEpoch(FAKE_NSINFO,1);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(1,journal.getLastPromisedEpoch());
journal.newEpoch(FAKE_NSINFO,3);
assertFalse(newEpoch.hasLastSegmentTxId());
assertEquals(3,journal.getLastPromisedEpoch());
try {
journal.newEpoch(FAKE_NSINFO,3);
fail("Should have failed to promise same epoch twice");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Proposed epoch 3 <= last promise 3",ioe);
}
try {
journal.startLogSegment(makeRI(1),12345L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Should have rejected call from prior epoch");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe);
}
try {
journal.journal(makeRI(1),12345L,100L,0,new byte[0]);
fail("Should have rejected call from prior epoch");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormatResetsCachedValues() throws Exception {
journal.newEpoch(FAKE_NSINFO,12345L);
journal.startLogSegment(new RequestInfo(JID,12345L,1L,0L),1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertEquals(12345L,journal.getLastPromisedEpoch());
assertEquals(12345L,journal.getLastWriterEpoch());
assertTrue(journal.isFormatted());
journal.close();
journal.format(FAKE_NSINFO_2);
assertEquals(0,journal.getLastPromisedEpoch());
assertEquals(0,journal.getLastWriterEpoch());
assertTrue(journal.isFormatted());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test whether JNs can correctly handle editlog that cannot be decoded.
*/
@Test public void testScanEditLog() throws Exception {
journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1);
final int numTxns=5;
byte[] ops=QJMTestUtil.createGabageTxns(1,5);
journal.journal(makeRI(2),1,1,numTxns,ops);
SegmentStateProto segmentState=journal.getSegmentInfo(1);
assertTrue(segmentState.getIsInProgress());
Assert.assertEquals(numTxns,segmentState.getEndTxId());
Assert.assertEquals(1,segmentState.getStartTxId());
journal.finalizeLogSegment(makeRI(3),1,numTxns);
segmentState=journal.getSegmentInfo(1);
assertFalse(segmentState.getIsInProgress());
Assert.assertEquals(numTxns,segmentState.getEndTxId());
Assert.assertEquals(1,segmentState.getStartTxId());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testRestartJournal() throws Exception {
journal.newEpoch(FAKE_NSINFO,1);
journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(makeRI(2),1,1,2,QJMTestUtil.createTxnData(1,2));
String storageString=journal.getStorage().toColonSeparatedString();
System.err.println("storage string: " + storageString);
journal.close();
journal=new Journal(conf,TEST_LOG_DIR,JID,StartupOption.REGULAR,mockErrorReporter);
assertEquals(storageString,journal.getStorage().toColonSeparatedString());
assertEquals(1,journal.getLastPromisedEpoch());
NewEpochResponseProtoOrBuilder newEpoch=journal.newEpoch(FAKE_NSINFO,2);
assertEquals(1,newEpoch.getLastSegmentTxId());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testMaintainCommittedTxId() throws Exception {
journal.newEpoch(FAKE_NSINFO,1);
journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
journal.journal(new RequestInfo(JID,1,2,0),1,1,3,QJMTestUtil.createTxnData(1,3));
assertEquals(0,journal.getCommittedTxnIdForTests());
journal.journal(new RequestInfo(JID,1,3,3),1,4,3,QJMTestUtil.createTxnData(4,6));
assertEquals(3,journal.getCommittedTxnIdForTests());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the JournalNode performs correctly as a Paxos
* Acceptor process.
*/
@Test(timeout=100000) public void testAcceptRecoveryBehavior() throws Exception {
try {
ch.prepareRecovery(1L).get();
fail("Did not throw IllegalState when trying to run paxos without an epoch");
}
catch ( ExecutionException ise) {
GenericTestUtils.assertExceptionContains("bad epoch",ise);
}
ch.newEpoch(1).get();
ch.setEpoch(1);
PrepareRecoveryResponseProto prep=ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertFalse(prep.hasSegmentState());
ch.startLogSegment(1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L,1L,1,QJMTestUtil.createTxnData(1,1)).get();
prep=ch.prepareRecovery(1L).get();
System.err.println("Prep: " + prep);
assertFalse(prep.hasAcceptedInEpoch());
assertTrue(prep.hasSegmentState());
ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get();
ch.newEpoch(2);
ch.setEpoch(2);
prep=ch.prepareRecovery(1L).get();
assertEquals(1L,prep.getAcceptedInEpoch());
assertEquals(1L,prep.getSegmentState().getEndTxId());
ch.setEpoch(1);
try {
ch.prepareRecovery(1L).get();
fail("prepare from earlier epoch not rejected");
}
catch ( ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe);
}
try {
ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get();
fail("accept from earlier epoch not rejected");
}
catch ( ExecutionException ioe) {
GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) public void testHttpServer() throws Exception {
String urlRoot=jn.getHttpServerURI();
String pageContents=DFSTestUtil.urlGet(new URL(urlRoot + "/jmx"));
assertTrue("Bad contents: " + pageContents,pageContents.contains("Hadoop:service=JournalNode,name=JvmMetrics"));
byte[] EDITS_DATA=QJMTestUtil.createTxnData(1,3);
IPCLoggerChannel ch=new IPCLoggerChannel(conf,FAKE_NSINFO,journalId,jn.getBoundIpcAddress());
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L,1,3,EDITS_DATA).get();
ch.finalizeLogSegment(1,3).get();
byte[] retrievedViaHttp=DFSTestUtil.urlGetBytes(new URL(urlRoot + "/getJournal?segmentTxId=1&jid=" + journalId));
byte[] expected=Bytes.concat(Ints.toByteArray(HdfsConstants.NAMENODE_LAYOUT_VERSION),(new byte[]{0,0,0,0}),EDITS_DATA);
assertArrayEquals(expected,retrievedViaHttp);
URL badUrl=new URL(urlRoot + "/getJournal?segmentTxId=12345&jid=" + journalId);
HttpURLConnection connection=(HttpURLConnection)badUrl.openConnection();
try {
assertEquals(404,connection.getResponseCode());
}
finally {
connection.disconnect();
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=100000) public void testReturnsSegmentInfoAtEpochTransition() throws Exception {
ch.newEpoch(1).get();
ch.setEpoch(1);
ch.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
ch.sendEdits(1L,1,2,QJMTestUtil.createTxnData(1,2)).get();
NewEpochResponseProto response=ch.newEpoch(2).get();
ch.setEpoch(2);
assertEquals(1,response.getLastSegmentTxId());
ch.finalizeLogSegment(1,2).get();
response=ch.newEpoch(3).get();
ch.setEpoch(3);
assertEquals(1,response.getLastSegmentTxId());
ch.startLogSegment(3,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
response=ch.newEpoch(4).get();
ch.setEpoch(4);
assertEquals(1,response.getLastSegmentTxId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJournalNodeMXBean() throws Exception {
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=JournalNode,name=JournalNodeInfo");
String journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus");
assertEquals(jn.getJournalsStatus(),journalStatus);
assertFalse(journalStatus.contains(NAMESERVICE));
final NamespaceInfo FAKE_NSINFO=new NamespaceInfo(12345,"mycluster","my-bp",0L);
jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO);
journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus");
assertEquals(jn.getJournalsStatus(),journalStatus);
Map> jMap=new HashMap>();
Map infoMap=new HashMap();
infoMap.put("Formatted","true");
jMap.put(NAMESERVICE,infoMap);
assertEquals(JSON.toString(jMap),journalStatus);
jCluster=new MiniJournalCluster.Builder(new Configuration()).format(false).numJournalNodes(NUM_JN).build();
jn=jCluster.getJournalNode(0);
journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus");
assertEquals(jn.getJournalsStatus(),journalStatus);
jMap=new HashMap>();
infoMap=new HashMap();
infoMap.put("Formatted","true");
jMap.put(NAMESERVICE,infoMap);
assertEquals(JSON.toString(jMap),journalStatus);
}
InternalCallVerifier EqualityVerifier
@Test public void testAddDelegationTokensDFSApi() throws Exception {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("JobTracker");
DistributedFileSystem dfs=cluster.getFileSystem();
Credentials creds=new Credentials();
final Token> tokens[]=dfs.addDelegationTokens("JobTracker",creds);
Assert.assertEquals(1,tokens.length);
Assert.assertEquals(1,creds.numberOfTokens());
checkTokenIdentifier(ugi,tokens[0]);
final Token> tokens2[]=dfs.addDelegationTokens("JobTracker",creds);
Assert.assertEquals(0,tokens2.length);
Assert.assertEquals(1,creds.numberOfTokens());
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationTokenWebHdfsApi() throws Exception {
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
final String uri=WebHdfsFileSystem.SCHEME + "://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final UserGroupInformation ugi=UserGroupInformation.createUserForTesting("JobTracker",new String[]{"user"});
final WebHdfsFileSystem webhdfs=ugi.doAs(new PrivilegedExceptionAction(){
@Override public WebHdfsFileSystem run() throws Exception {
return (WebHdfsFileSystem)FileSystem.get(new URI(uri),config);
}
}
);
{
Credentials creds=new Credentials();
final Token> tokens[]=webhdfs.addDelegationTokens("JobTracker",creds);
Assert.assertEquals(1,tokens.length);
Assert.assertEquals(1,creds.numberOfTokens());
Assert.assertSame(tokens[0],creds.getAllTokens().iterator().next());
checkTokenIdentifier(ugi,tokens[0]);
final Token> tokens2[]=webhdfs.addDelegationTokens("JobTracker",creds);
Assert.assertEquals(0,tokens2.length);
}
}
InternalCallVerifier BooleanVerifier
/**
* Test that the delegation token secret manager only runs when the
* NN is out of safe mode. This is because the secret manager
* has to log to the edit log, which should not be written in
* safe mode. Regression test for HDFS-2579.
*/
@Test public void testDTManagerInSafeMode() throws Exception {
cluster.startDataNodes(config,1,true,StartupOption.REGULAR,null);
FileSystem fs=cluster.getFileSystem();
for (int i=0; i < 5; i++) {
DFSTestUtil.createFile(fs,new Path("/test-" + i),100,(short)1,1L);
}
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY,500);
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,30000);
cluster.setWaitSafeMode(false);
cluster.restartNameNode();
NameNode nn=cluster.getNameNode();
assertTrue(nn.isInSafeMode());
DelegationTokenSecretManager sm=NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse("Secret manager should not run in safe mode",sm.isRunning());
NameNodeAdapter.leaveSafeMode(nn);
assertTrue("Secret manager should start when safe mode is exited",sm.isRunning());
LOG.info("========= entering safemode again");
NameNodeAdapter.enterSafeMode(nn,false);
assertFalse("Secret manager should stop again when safe mode " + "is manually entered",sm.isRunning());
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,0);
cluster.setWaitSafeMode(true);
cluster.restartNameNode();
nn=cluster.getNameNode();
sm=NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse(nn.isInSafeMode());
assertTrue(sm.isRunning());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationTokenWithDoAs() throws Exception {
final DistributedFileSystem dfs=cluster.getFileSystem();
final Credentials creds=new Credentials();
final Token> tokens[]=dfs.addDelegationTokens("JobTracker",creds);
Assert.assertEquals(1,tokens.length);
@SuppressWarnings("unchecked") final Token token=(Token)tokens[0];
final UserGroupInformation longUgi=UserGroupInformation.createRemoteUser("JobTracker/foo.com@FOO.COM");
final UserGroupInformation shortUgi=UserGroupInformation.createRemoteUser("JobTracker");
longUgi.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws IOException {
try {
token.renew(config);
}
catch ( Exception e) {
Assert.fail("Could not renew delegation token for user " + longUgi);
}
return null;
}
}
);
shortUgi.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
token.renew(config);
return null;
}
}
);
longUgi.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws IOException {
try {
token.cancel(config);
}
catch ( Exception e) {
Assert.fail("Could not cancel delegation token for user " + longUgi);
}
return null;
}
}
);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDelegationTokenSecretManager() throws Exception {
Token token=generateDelegationToken("SomeUser","JobTracker");
try {
dtSecretManager.renewToken(token,"FakeRenewer");
Assert.fail("should have failed");
}
catch ( AccessControlException ace) {
}
dtSecretManager.renewToken(token,"JobTracker");
DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(6000);
try {
dtSecretManager.retrievePassword(identifier);
Assert.fail("Token should have expired");
}
catch ( InvalidToken e) {
}
dtSecretManager.renewToken(token,"JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(5000);
try {
dtSecretManager.renewToken(token,"JobTracker");
Assert.fail("should have been expired");
}
catch ( InvalidToken it) {
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testDelegationTokenWithRealUser() throws IOException {
try {
Token>[] tokens=proxyUgi.doAs(new PrivilegedExceptionAction[]>(){
@Override public Token>[] run() throws IOException {
return cluster.getFileSystem().addDelegationTokens("RenewerUser",null);
}
}
);
DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=tokens[0].getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
Assert.assertEquals(identifier.getUser().getUserName(),PROXY_USER);
Assert.assertEquals(identifier.getUser().getRealUser().getUserName(),REAL_USER);
}
catch ( InterruptedException e) {
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testWebHdfsDoAs() throws Exception {
WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
final WebHdfsFileSystem webhdfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi,config,WebHdfsFileSystem.SCHEME);
final Path root=new Path("/");
cluster.getFileSystem().setPermission(root,new FsPermission((short)0777));
Whitebox.setInternalState(webhdfs,"ugi",proxyUgi);
{
Path responsePath=webhdfs.getHomeDirectory();
WebHdfsTestUtil.LOG.info("responsePath=" + responsePath);
Assert.assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER,responsePath.toString());
}
final Path f=new Path("/testWebHdfsDoAs/a.txt");
{
FSDataOutputStream out=webhdfs.create(f);
out.write("Hello, webhdfs user!".getBytes());
out.close();
final FileStatus status=webhdfs.getFileStatus(f);
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
Assert.assertEquals(PROXY_USER,status.getOwner());
}
{
final FSDataOutputStream out=webhdfs.append(f);
out.write("\nHello again!".getBytes());
out.close();
final FileStatus status=webhdfs.getFileStatus(f);
WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
WebHdfsTestUtil.LOG.info("status.getLen() =" + status.getLen());
Assert.assertEquals(PROXY_USER,status.getOwner());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* This test writes a file and gets the block locations without closing the
* file, and tests the block token in the last block. Block token is verified
* by ensuring it is of correct kind.
* @throws IOException
* @throws InterruptedException
*/
@Test public void testBlockTokenInLastLocatedBlock() throws IOException, InterruptedException {
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,true);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem fs=cluster.getFileSystem();
String fileName="/testBlockTokenInLastLocatedBlock";
Path filePath=new Path(fileName);
FSDataOutputStream out=fs.create(filePath,(short)1);
out.write(new byte[1000]);
LocatedBlocks locatedBlocks=cluster.getNameNodeRpc().getBlockLocations(fileName,0,1000);
while (locatedBlocks.getLastLocatedBlock() == null) {
Thread.sleep(100);
locatedBlocks=cluster.getNameNodeRpc().getBlockLocations(fileName,0,1000);
}
Token token=locatedBlocks.getLastLocatedBlock().getBlockToken();
Assert.assertEquals(BlockTokenIdentifier.KIND_NAME,token.getKind());
out.close();
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
* will not end up using up thousands of sockets. This is a regression test
* for HDFS-1965.
*/
@Test public void testBlockTokenRpcLeak() throws Exception {
Configuration conf=new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null);
Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
final Server server=createMockDatanode(sm,token,conf);
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
DatanodeID fakeDnId=DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b=new ExtendedBlock("fake-pool",new Block(12345L));
LocatedBlock fakeBlock=new LocatedBlock(b,new DatanodeInfo[0]);
fakeBlock.setBlockToken(token);
ClientDatanodeProtocol proxyToNoWhere=RPC.getProxy(ClientDatanodeProtocol.class,ClientDatanodeProtocol.versionID,new InetSocketAddress("1.1.1.1",1),UserGroupInformation.createRemoteUser("junk"),conf,NetUtils.getDefaultSocketFactory(conf));
ClientDatanodeProtocol proxy=null;
int fdsAtStart=countOpenFileDescriptors();
try {
long endTime=Time.now() + 3000;
while (Time.now() < endTime) {
proxy=DFSUtil.createClientDatanodeProtocolProxy(fakeDnId,conf,1000,false,fakeBlock);
assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
}
LOG.info("Num open fds:" + countOpenFileDescriptors());
}
int fdsAtEnd=countOpenFileDescriptors();
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
}
finally {
server.stop();
}
RPC.stopProxy(proxyToNoWhere);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testBlockTokenRpc() throws Exception {
Configuration conf=new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null);
Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
final Server server=createMockDatanode(sm,token,conf);
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
final UserGroupInformation ticket=UserGroupInformation.createRemoteUser(block3.toString());
ticket.addToken(token);
ClientDatanodeProtocol proxy=null;
try {
proxy=DFSUtil.createClientDatanodeProtocolProxy(addr,ticket,conf,NetUtils.getDefaultSocketFactory(conf));
assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3));
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=100000) public void testUnknownDatanode() throws Exception {
Configuration conf=new HdfsConfiguration();
initConf(conf);
long distribution[]=new long[]{50 * CAPACITY / 100,70 * CAPACITY / 100,0 * CAPACITY / 100};
long capacities[]=new long[]{CAPACITY,CAPACITY,CAPACITY};
String racks[]=new String[]{RACK0,RACK1,RACK1};
int numDatanodes=distribution.length;
if (capacities.length != numDatanodes || racks.length != numDatanodes) {
throw new IllegalArgumentException("Array length is not the same");
}
final long totalUsedSpace=sum(distribution);
ExtendedBlock[] blocks=generateBlocks(conf,totalUsedSpace,(short)numDatanodes);
Block[][] blocksDN=distributeBlocks(blocks,(short)(numDatanodes - 1),distribution);
conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"0.0f");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).racks(racks).simulatedCapacities(capacities).build();
try {
cluster.waitActive();
client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy();
for (int i=0; i < 3; i++) {
cluster.injectBlocks(i,Arrays.asList(blocksDN[i]),null);
}
cluster.startDataNodes(conf,1,true,null,new String[]{RACK0},null,new long[]{CAPACITY});
cluster.triggerHeartbeats();
Collection namenodes=DFSUtil.getNsServiceRpcUris(conf);
Set datanodes=new HashSet();
datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
Balancer.Parameters p=new Balancer.Parameters(Balancer.Parameters.DEFAULT.policy,Balancer.Parameters.DEFAULT.threshold,datanodes,Balancer.Parameters.DEFAULT.nodesToBeIncluded);
final int r=Balancer.run(namenodes,p,conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(),r);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test a cluster with even distribution, then a new empty node is added to
* the cluster. Test start a cluster with specified number of nodes, and fills
* it to be 30% full (with a single file replicated identically to all
* datanodes); It then adds one new empty node and starts balancing.
*/
@Test(timeout=60000) public void testBalancerWithHANameNodes() throws Exception {
Configuration conf=new HdfsConfiguration();
TestBalancer.initConf(conf);
long newNodeCapacity=TestBalancer.CAPACITY;
String newNodeRack=TestBalancer.RACK2;
String[] racks=new String[]{TestBalancer.RACK0,TestBalancer.RACK1};
long[] capacities=new long[]{TestBalancer.CAPACITY,TestBalancer.CAPACITY};
assertEquals(capacities.length,racks.length);
int numOfDatanodes=capacities.length;
NNConf nn1Conf=new MiniDFSNNTopology.NNConf("nn1");
nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
Configuration copiedConf=new Configuration(conf);
cluster=new MiniDFSCluster.Builder(copiedConf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build();
HATestUtil.setFailoverConfigurations(cluster,conf);
try {
cluster.waitActive();
cluster.transitionToActive(1);
Thread.sleep(500);
client=NameNodeProxies.createProxy(conf,FileSystem.getDefaultUri(conf),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 3 / 10;
TestBalancer.createFile(cluster,TestBalancer.filePath,totalUsedSpace / numOfDatanodes,(short)numOfDatanodes,1);
cluster.startDataNodes(conf,1,true,null,new String[]{newNodeRack},new long[]{newNodeCapacity});
totalCapacity+=newNodeCapacity;
TestBalancer.waitForHeartBeat(totalUsedSpace,totalCapacity,client,cluster);
Collection namenodes=DFSUtil.getNsServiceRpcUris(conf);
assertEquals(1,namenodes.size());
assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
final int r=Balancer.run(namenodes,Balancer.Parameters.DEFAULT,conf);
assertEquals(ExitStatus.SUCCESS.getExitCode(),r);
TestBalancer.waitForBalancer(totalUsedSpace,totalCapacity,client,cluster,Balancer.Parameters.DEFAULT);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
/**
* Create a cluster with even distribution, and a new empty node is added to
* the cluster, then test rack locality for balancer policy.
*/
@Test(timeout=60000) public void testBalancerWithRackLocality() throws Exception {
Configuration conf=createConf();
long[] capacities=new long[]{CAPACITY,CAPACITY};
String[] racks=new String[]{RACK0,RACK1};
String[] nodeGroups=new String[]{NODEGROUP0,NODEGROUP1};
int numOfDatanodes=capacities.length;
assertEquals(numOfDatanodes,racks.length);
MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities);
MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
cluster=new MiniDFSClusterWithNodeGroup(builder);
try {
cluster.waitActive();
client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy();
long totalCapacity=TestBalancer.sum(capacities);
long totalUsedSpace=totalCapacity * 3 / 10;
long length=totalUsedSpace / numOfDatanodes;
TestBalancer.createFile(cluster,filePath,length,(short)numOfDatanodes,0);
LocatedBlocks lbs=client.getBlockLocations(filePath.toUri().getPath(),0,length);
Set before=getBlocksOnRack(lbs.getLocatedBlocks(),RACK0);
long newCapacity=CAPACITY;
String newRack=RACK1;
String newNodeGroup=NODEGROUP2;
cluster.startDataNodes(conf,1,true,null,new String[]{newRack},new long[]{newCapacity},new String[]{newNodeGroup});
totalCapacity+=newCapacity;
runBalancerCanFinish(conf,totalUsedSpace,totalCapacity);
lbs=client.getBlockLocations(filePath.toUri().getPath(),0,length);
Set after=getBlocksOnRack(lbs.getLocatedBlocks(),RACK0);
assertEquals(before,after);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAddStorage() throws Exception {
BlockInfo blockInfo=new BlockInfo(3);
final DatanodeStorageInfo storage=DFSTestUtil.createDatanodeStorageInfo("storageID","127.0.0.1");
boolean added=blockInfo.addStorage(storage);
Assert.assertTrue(added);
Assert.assertEquals(storage,blockInfo.getStorageInfo(0));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testBlockListMoveToHead() throws Exception {
LOG.info("BlockInfo moveToHead tests...");
final int MAX_BLOCKS=10;
DatanodeStorageInfo dd=DFSTestUtil.createDatanodeStorageInfo("s1","1.1.1.1");
ArrayList blockList=new ArrayList(MAX_BLOCKS);
ArrayList blockInfoList=new ArrayList();
int headIndex;
int curIndex;
LOG.info("Building block list...");
for (int i=0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP));
blockInfoList.add(new BlockInfo(blockList.get(i),3));
dd.addBlock(blockInfoList.get(i));
assertEquals("Find datanode should be 0",0,blockInfoList.get(i).findStorageInfo(dd));
}
LOG.info("Checking list length...");
assertEquals("Length should be MAX_BLOCK",MAX_BLOCKS,dd.numBlocks());
Iterator it=dd.getBlockIterator();
int len=0;
while (it.hasNext()) {
it.next();
len++;
}
assertEquals("There should be MAX_BLOCK blockInfo's",MAX_BLOCKS,len);
headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd);
LOG.info("Moving each block to the head of the list...");
for (int i=0; i < MAX_BLOCKS; i++) {
curIndex=blockInfoList.get(i).findStorageInfo(dd);
headIndex=dd.moveBlockToHead(blockInfoList.get(i),curIndex,headIndex);
assertEquals("Block should be at the head of the list now.",blockInfoList.get(i),dd.getBlockListHeadForTesting());
}
LOG.info("Moving head to the head...");
BlockInfo temp=dd.getBlockListHeadForTesting();
curIndex=0;
headIndex=0;
dd.moveBlockToHead(temp,curIndex,headIndex);
assertEquals("Moving head to the head of the list shopuld not change the list",temp,dd.getBlockListHeadForTesting());
LOG.info("Checking elements of the list...");
temp=dd.getBlockListHeadForTesting();
assertNotNull("Head should not be null",temp);
int c=MAX_BLOCKS - 1;
while (temp != null) {
assertEquals("Expected element is not on the list",blockInfoList.get(c--),temp);
temp=temp.getNext(0);
}
LOG.info("Moving random blocks to the head of the list...");
headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd);
Random rand=new Random();
for (int i=0; i < MAX_BLOCKS; i++) {
int j=rand.nextInt(MAX_BLOCKS);
curIndex=blockInfoList.get(j).findStorageInfo(dd);
headIndex=dd.moveBlockToHead(blockInfoList.get(j),curIndex,headIndex);
assertEquals("Block should be at the head of the list now.",blockInfoList.get(j),dd.getBlockListHeadForTesting());
}
}
InternalCallVerifier ConditionMatcher
@Test public void testReplaceStorage() throws Exception {
final DatanodeStorageInfo storage1=DFSTestUtil.createDatanodeStorageInfo("storageID1","127.0.0.1");
final DatanodeStorageInfo storage2=new DatanodeStorageInfo(storage1.getDatanodeDescriptor(),new DatanodeStorage("storageID2"));
final int NUM_BLOCKS=10;
BlockInfo[] blockInfos=new BlockInfo[NUM_BLOCKS];
for (int i=0; i < NUM_BLOCKS; ++i) {
blockInfos[i]=new BlockInfo(3);
storage1.addBlock(blockInfos[i]);
}
boolean added=storage2.addBlock(blockInfos[NUM_BLOCKS / 2]);
Assert.assertThat(added,is(false));
Assert.assertThat(blockInfos[NUM_BLOCKS / 2].getStorageInfo(0),is(storage2));
}
InternalCallVerifier EqualityVerifier
@Test public void testInitializeBlockRecovery() throws Exception {
DatanodeStorageInfo s1=DFSTestUtil.createDatanodeStorageInfo("10.10.1.1","s1");
DatanodeDescriptor dd1=s1.getDatanodeDescriptor();
DatanodeStorageInfo s2=DFSTestUtil.createDatanodeStorageInfo("10.10.1.2","s2");
DatanodeDescriptor dd2=s2.getDatanodeDescriptor();
DatanodeStorageInfo s3=DFSTestUtil.createDatanodeStorageInfo("10.10.1.3","s3");
DatanodeDescriptor dd3=s3.getDatanodeDescriptor();
dd1.isAlive=dd2.isAlive=dd3.isAlive=true;
BlockInfoUnderConstruction blockInfo=new BlockInfoUnderConstruction(new Block(0,0,GenerationStamp.LAST_RESERVED_STAMP),3,BlockUCState.UNDER_CONSTRUCTION,new DatanodeStorageInfo[]{s1,s2,s3});
long currentTime=System.currentTimeMillis();
dd1.setLastUpdate(currentTime - 3 * 1000);
dd2.setLastUpdate(currentTime - 1 * 1000);
dd3.setLastUpdate(currentTime - 2 * 1000);
blockInfo.initializeBlockRecovery(1);
BlockInfoUnderConstruction[] blockInfoRecovery=dd2.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0],blockInfo);
currentTime=System.currentTimeMillis();
dd1.setLastUpdate(currentTime - 2 * 1000);
dd2.setLastUpdate(currentTime - 1 * 1000);
dd3.setLastUpdate(currentTime - 3 * 1000);
blockInfo.initializeBlockRecovery(2);
blockInfoRecovery=dd1.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0],blockInfo);
currentTime=System.currentTimeMillis();
dd1.setLastUpdate(currentTime - 2 * 1000);
dd2.setLastUpdate(currentTime - 1 * 1000);
dd3.setLastUpdate(currentTime - 3 * 1000);
currentTime=System.currentTimeMillis();
blockInfo.initializeBlockRecovery(3);
blockInfoRecovery=dd3.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0],blockInfo);
currentTime=System.currentTimeMillis();
dd1.setLastUpdate(currentTime - 2 * 1000);
dd2.setLastUpdate(currentTime - 1 * 1000);
dd3.setLastUpdate(currentTime);
currentTime=System.currentTimeMillis();
blockInfo.initializeBlockRecovery(3);
blockInfoRecovery=dd3.getLeaseRecoveryCommand(1);
assertEquals(blockInfoRecovery[0],blockInfo);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSafeModeIBR() throws Exception {
DatanodeDescriptor node=spy(nodes.get(0));
DatanodeStorageInfo ds=node.getStorageInfos()[0];
node.setDatanodeUuidForTesting(ds.getStorageID());
node.isAlive=true;
DatanodeRegistration nodeReg=new DatanodeRegistration(node,null,null,"");
doReturn(true).when(fsn).isInStartupSafeMode();
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node);
assertEquals(node,bm.getDatanodeManager().getDatanode(node));
assertEquals(0,ds.getBlockReportCount());
reset(node);
bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null));
assertEquals(1,ds.getBlockReportCount());
reset(node);
bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null));
assertEquals(1,ds.getBlockReportCount());
bm.getDatanodeManager().removeDatanode(node);
reset(node);
bm.getDatanodeManager().registerDatanode(nodeReg);
verify(node).updateRegInfo(nodeReg);
assertEquals(0,ds.getBlockReportCount());
reset(node);
bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null));
assertEquals(1,ds.getBlockReportCount());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSafeModeIBRAfterIncremental() throws Exception {
DatanodeDescriptor node=spy(nodes.get(0));
DatanodeStorageInfo ds=node.getStorageInfos()[0];
node.setDatanodeUuidForTesting(ds.getStorageID());
node.isAlive=true;
DatanodeRegistration nodeReg=new DatanodeRegistration(node,null,null,"");
doReturn(true).when(fsn).isInStartupSafeMode();
bm.getDatanodeManager().registerDatanode(nodeReg);
bm.getDatanodeManager().addDatanode(node);
assertEquals(node,bm.getDatanodeManager().getDatanode(node));
assertEquals(0,ds.getBlockReportCount());
reset(node);
doReturn(1).when(node).numBlocks();
bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null));
assertEquals(1,ds.getBlockReportCount());
}
InternalCallVerifier NullVerifier
/**
* Test that a source node for a highest-priority replication is chosen even if all available
* source nodes have reached their replication limits.
*/
@Test public void testHighestPriReplSrcChosenDespiteMaxReplLimit() throws Exception {
bm.maxReplicationStreams=0;
bm.replicationStreamsHardLimit=1;
long blockId=42;
Block aBlock=new Block(blockId,0,0);
List origNodes=getNodes(0,1);
addBlockOnNodes(blockId,origNodes.subList(0,1));
List cntNodes=new LinkedList();
List liveNodes=new LinkedList();
assertNotNull("Chooses source node for a highest-priority replication" + " even if all available source nodes have reached their replication" + " limits below the hard limit.",bm.chooseSourceDatanode(aBlock,cntNodes,liveNodes,new NumberReplicas(),UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY));
assertNull("Does not choose a source node for a less-than-highest-priority" + " replication since all available source nodes have reached" + " their replication limits.",bm.chooseSourceDatanode(aBlock,cntNodes,liveNodes,new NumberReplicas(),UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED));
DatanodeStorageInfo targets[]={origNodes.get(1).getStorageInfos()[0]};
origNodes.get(0).addBlockToBeReplicated(aBlock,targets);
assertNull("Does not choose a source node for a highest-priority" + " replication when all available nodes exceed the hard limit.",bm.chooseSourceDatanode(aBlock,cntNodes,liveNodes,new NumberReplicas(),UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRead() throws Exception {
MiniDFSCluster cluster=null;
int numDataNodes=2;
Configuration conf=getConf(numDataNodes);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
final NameNode nn=cluster.getNameNode();
final NamenodeProtocols nnProto=nn.getRpcServer();
final BlockManager bm=nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager();
SecurityTestUtil.setBlockTokenLifetime(sm,1000L);
Path fileToRead=new Path(FILE_TO_READ);
FileSystem fs=cluster.getFileSystem();
createFile(fs,fileToRead);
FSDataInputStream in1=fs.open(fileToRead);
assertTrue(checkFile1(in1));
FSDataInputStream in2=fs.open(fileToRead);
assertTrue(checkFile1(in2));
FSDataInputStream in3=fs.open(fileToRead);
assertTrue(checkFile2(in3));
DFSClient client=null;
try {
client=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
}
finally {
if (client != null) client.close();
}
List locatedBlocks=nnProto.getBlockLocations(FILE_TO_READ,0,FILE_SIZE).getLocatedBlocks();
LocatedBlock lblock=locatedBlocks.get(0);
Token myToken=lblock.getBlockToken();
assertFalse(SecurityTestUtil.isBlockTokenExpired(myToken));
tryRead(conf,lblock,true);
while (!SecurityTestUtil.isBlockTokenExpired(myToken)) {
try {
Thread.sleep(10);
}
catch ( InterruptedException ignored) {
}
}
assertTrue(SecurityTestUtil.isBlockTokenExpired(myToken));
tryRead(conf,lblock,false);
lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
tryRead(conf,lblock,true);
ExtendedBlock wrongBlock=new ExtendedBlock(lblock.getBlock().getBlockPoolId(),lblock.getBlock().getBlockId() + 1);
lblock.setBlockToken(sm.generateToken(wrongBlock,EnumSet.of(BlockTokenSecretManager.AccessMode.READ)));
tryRead(conf,lblock,false);
lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE,BlockTokenSecretManager.AccessMode.COPY,BlockTokenSecretManager.AccessMode.REPLACE)));
tryRead(conf,lblock,false);
SecurityTestUtil.setBlockTokenLifetime(sm,600 * 1000L);
List lblocks=DFSTestUtil.getAllBlocks(in1);
for ( LocatedBlock blk : lblocks) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
in1.seek(0);
assertTrue(checkFile1(in1));
List lblocks2=DFSTestUtil.getAllBlocks(in2);
for ( LocatedBlock blk : lblocks2) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
assertTrue(in2.seekToNewSource(0));
assertTrue(checkFile1(in2));
List lblocks3=DFSTestUtil.getAllBlocks(in3);
for ( LocatedBlock blk : lblocks3) {
assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
assertTrue(checkFile2(in3));
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
lblocks=DFSTestUtil.getAllBlocks(in1);
for ( LocatedBlock blk : lblocks) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
in1.seek(0);
assertTrue(checkFile1(in1));
lblocks2=DFSTestUtil.getAllBlocks(in2);
for ( LocatedBlock blk : lblocks2) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
lblocks3=DFSTestUtil.getAllBlocks(in3);
for ( LocatedBlock blk : lblocks3) {
assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken()));
}
assertTrue(checkFile2(in3));
cluster.restartNameNode(0);
cluster.shutdownNameNode(0);
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
assertTrue(checkFile2(in3));
cluster.restartNameNode(0);
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
in1.seek(0);
assertFalse(checkFile1(in1));
assertFalse(checkFile2(in3));
cluster.restartNameNode(0);
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
assertTrue(checkFile2(in3));
assertTrue(cluster.restartDataNodes(false));
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
in1.seek(0);
assertTrue(checkFile1(in1));
in2.seekToNewSource(0);
assertTrue(checkFile1(in2));
assertTrue(checkFile2(in3));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* testing that APPEND operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test public void testAppend() throws Exception {
MiniDFSCluster cluster=null;
int numDataNodes=2;
Configuration conf=getConf(numDataNodes);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
final NameNode nn=cluster.getNameNode();
final BlockManager bm=nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager();
SecurityTestUtil.setBlockTokenLifetime(sm,1000L);
Path fileToAppend=new Path(FILE_TO_APPEND);
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream stm=writeFile(fs,fileToAppend,(short)numDataNodes,BLOCK_SIZE);
stm.write(rawData,0,1);
stm.close();
stm=fs.append(fileToAppend);
int mid=rawData.length - 1;
stm.write(rawData,1,mid - 1);
stm.hflush();
Token token=DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
}
catch ( InterruptedException ignored) {
}
}
cluster.stopDataNode(0);
stm.write(rawData,mid,rawData.length - mid);
stm.close();
FSDataInputStream in5=fs.open(fileToAppend);
assertTrue(checkFile1(in5));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* testing that WRITE operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test public void testWrite() throws Exception {
MiniDFSCluster cluster=null;
int numDataNodes=2;
Configuration conf=getConf(numDataNodes);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes,cluster.getDataNodes().size());
final NameNode nn=cluster.getNameNode();
final BlockManager bm=nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager();
SecurityTestUtil.setBlockTokenLifetime(sm,1000L);
Path fileToWrite=new Path(FILE_TO_WRITE);
FileSystem fs=cluster.getFileSystem();
FSDataOutputStream stm=writeFile(fs,fileToWrite,(short)numDataNodes,BLOCK_SIZE);
int mid=rawData.length - 1;
stm.write(rawData,0,mid);
stm.hflush();
Token token=DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
}
catch ( InterruptedException ignored) {
}
}
cluster.stopDataNode(0);
stm.write(rawData,mid,rawData.length - mid);
stm.close();
FSDataInputStream in4=fs.open(fileToWrite);
assertTrue(checkFile1(in4));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testCorruptBlockRereplicatedAcrossRacks() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
int fileLen=512;
final Path filePath=new Path("/testFile");
String racks[]={"/rack1","/rack1","/rack2","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,fileLen,REPLICATION_FACTOR,1L);
final String fileContent=DFSTestUtil.readFile(fs,filePath);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
int dnToCorrupt=DFSTestUtil.firstDnWithBlock(cluster,b);
assertTrue(MiniDFSCluster.corruptReplica(dnToCorrupt,b));
cluster.restartDataNode(dnToCorrupt);
DFSTestUtil.waitCorruptReplicas(fs,ns,filePath,b,1);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
for (int i=0; i < racks.length; i++) {
String blockContent=cluster.readBlockOnDataNode(i,b);
if (blockContent != null && i != dnToCorrupt) {
assertEquals("Corrupt replica",fileContent,blockContent);
}
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=5;
final Path filePath=new Path("/testFile");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,"");
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
String racks[]={"/rack1","/rack2","/rack1","/rack1","/rack1"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
REPLICATION_FACTOR=2;
fs.setReplication(filePath,REPLICATION_FACTOR);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(filePath),0,Long.MAX_VALUE);
for ( String top : locs[0].getTopologyPaths()) {
if (!top.startsWith("/rack2")) {
String name=top.substring("/rack1".length() + 1);
DFSTestUtil.writeFile(localFileSys,excludeFile,name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs,name);
break;
}
}
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNodeDecomissionRespectsRackPolicy() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
final Path filePath=new Path("/testFile");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,"");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
String racks[]={"/rack1","/rack1","/rack2","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(filePath),0,Long.MAX_VALUE);
String name=locs[0].getNames()[0];
DFSTestUtil.writeFile(localFileSys,excludeFile,name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs,name);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testReduceReplFactorDueToRejoinRespectsRackPolicy() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
final Path filePath=new Path("/testFile");
String racks[]={"/rack1","/rack1","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
final DatanodeManager dm=ns.getBlockManager().getDatanodeManager();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
ArrayList datanodes=cluster.getDataNodes();
assertEquals(3,datanodes.size());
DataNode dataNode=datanodes.get(2);
DatanodeID dnId=dataNode.getDatanodeId();
cluster.stopDataNode(2);
dm.removeDatanode(dnId);
DFSTestUtil.waitForReplication(cluster,b,1,REPLICATION_FACTOR,1);
String rack2[]={"/rack2"};
cluster.startDataNodes(conf,1,true,null,rack2);
cluster.waitActive();
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testSingleList(){
DatanodeDescriptor dn=new DatanodeDescriptor(new DatanodeID("127.0.0.1","localhost","abcd",5000,5001,5002,5003));
CachedBlock[] blocks=new CachedBlock[]{new CachedBlock(0L,(short)1,true),new CachedBlock(1L,(short)1,true),new CachedBlock(2L,(short)1,true)};
Assert.assertTrue("expected pending cached list to start off empty.",!dn.getPendingCached().iterator().hasNext());
Assert.assertTrue("expected cached list to start off empty.",!dn.getCached().iterator().hasNext());
Assert.assertTrue("expected pending uncached list to start off empty.",!dn.getPendingUncached().iterator().hasNext());
Assert.assertTrue(dn.getCached().add(blocks[0]));
Assert.assertTrue("expected pending cached list to still be empty.",!dn.getPendingCached().iterator().hasNext());
Assert.assertEquals("failed to insert blocks[0]",blocks[0],dn.getCached().iterator().next());
Assert.assertTrue("expected pending uncached list to still be empty.",!dn.getPendingUncached().iterator().hasNext());
Assert.assertTrue(dn.getCached().add(blocks[1]));
Iterator iter=dn.getCached().iterator();
Assert.assertEquals(blocks[0],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
Assert.assertTrue(dn.getCached().addFirst(blocks[2]));
iter=dn.getCached().iterator();
Assert.assertEquals(blocks[2],iter.next());
Assert.assertEquals(blocks[0],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
Assert.assertTrue(dn.getCached().remove(blocks[0]));
iter=dn.getCached().iterator();
Assert.assertEquals(blocks[2],iter.next());
Assert.assertEquals(blocks[1],iter.next());
Assert.assertTrue(!iter.hasNext());
dn.getCached().clear();
Assert.assertTrue("expected cached list to be empty after clear.",!dn.getPendingCached().iterator().hasNext());
}
BranchVerifier InternalCallVerifier EqualityVerifier
/**
* Test if {@link FSNamesystem#computeInvalidateWork(int)}can schedule invalidate work correctly
*/
@Test public void testCompInvalidate() throws Exception {
final Configuration conf=new HdfsConfiguration();
final int NUM_OF_DATANODES=3;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
try {
cluster.waitActive();
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
final int blockInvalidateLimit=bm.getDatanodeManager().blockInvalidateLimit;
final DatanodeDescriptor[] nodes=bm.getDatanodeManager().getHeartbeatManager().getDatanodes();
assertEquals(nodes.length,NUM_OF_DATANODES);
namesystem.writeLock();
try {
for (int i=0; i < nodes.length; i++) {
for (int j=0; j < 3 * blockInvalidateLimit + 1; j++) {
Block block=new Block(i * (blockInvalidateLimit + 1) + j,0,GenerationStamp.LAST_RESERVED_STAMP);
bm.addToInvalidates(block,nodes[i]);
}
}
assertEquals(blockInvalidateLimit * NUM_OF_DATANODES,bm.computeInvalidateWork(NUM_OF_DATANODES + 1));
assertEquals(blockInvalidateLimit * NUM_OF_DATANODES,bm.computeInvalidateWork(NUM_OF_DATANODES));
assertEquals(blockInvalidateLimit * (NUM_OF_DATANODES - 1),bm.computeInvalidateWork(NUM_OF_DATANODES - 1));
int workCount=bm.computeInvalidateWork(1);
if (workCount == 1) {
assertEquals(blockInvalidateLimit + 1,bm.computeInvalidateWork(2));
}
else {
assertEquals(workCount,blockInvalidateLimit);
assertEquals(2,bm.computeInvalidateWork(2));
}
}
finally {
namesystem.writeUnlock();
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCorruptReplicaInfo() throws IOException, InterruptedException {
CorruptReplicasMap crm=new CorruptReplicasMap();
assertEquals("Number of corrupt blocks must initially be 0",0,crm.size());
assertNull("Param n cannot be less than 0",crm.getCorruptReplicaBlockIds(-1,null));
assertNull("Param n cannot be greater than 100",crm.getCorruptReplicaBlockIds(101,null));
long[] l=crm.getCorruptReplicaBlockIds(0,null);
assertNotNull("n = 0 must return non-null",l);
assertEquals("n = 0 must return an empty list",0,l.length);
int NUM_BLOCK_IDS=140;
List block_ids=new LinkedList();
for (int i=0; i < NUM_BLOCK_IDS; i++) {
block_ids.add((long)i);
}
DatanodeDescriptor dn1=DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeDescriptor dn2=DFSTestUtil.getLocalDatanodeDescriptor();
addToCorruptReplicasMap(crm,getBlock(0),dn1);
assertEquals("Number of corrupt blocks not returning correctly",1,crm.size());
addToCorruptReplicasMap(crm,getBlock(1),dn1);
assertEquals("Number of corrupt blocks not returning correctly",2,crm.size());
addToCorruptReplicasMap(crm,getBlock(1),dn2);
assertEquals("Number of corrupt blocks not returning correctly",2,crm.size());
crm.removeFromCorruptReplicasMap(getBlock(1));
assertEquals("Number of corrupt blocks not returning correctly",1,crm.size());
crm.removeFromCorruptReplicasMap(getBlock(0));
assertEquals("Number of corrupt blocks not returning correctly",0,crm.size());
for ( Long block_id : block_ids) {
addToCorruptReplicasMap(crm,getBlock(block_id),dn1);
}
assertEquals("Number of corrupt blocks not returning correctly",NUM_BLOCK_IDS,crm.size());
assertTrue("First five block ids not returned correctly ",Arrays.equals(new long[]{0,1,2,3,4},crm.getCorruptReplicaBlockIds(5,null)));
LOG.info(crm.getCorruptReplicaBlockIds(10,7L));
LOG.info(block_ids.subList(7,18));
assertTrue("10 blocks after 7 not returned correctly ",Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},crm.getCorruptReplicaBlockIds(10,7L)));
}
InternalCallVerifier EqualityVerifier
/**
* Test that getInvalidateBlocks observes the maxlimit.
*/
@Test public void testGetInvalidateBlocks() throws Exception {
final int MAX_BLOCKS=10;
final int REMAINING_BLOCKS=2;
final int MAX_LIMIT=MAX_BLOCKS - REMAINING_BLOCKS;
DatanodeDescriptor dd=DFSTestUtil.getLocalDatanodeDescriptor();
ArrayList blockList=new ArrayList(MAX_BLOCKS);
for (int i=0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
Block[] bc=dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length,MAX_LIMIT);
bc=dd.getInvalidateBlocks(MAX_LIMIT);
assertEquals(bc.length,REMAINING_BLOCKS);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBlocksCounter() throws Exception {
DatanodeDescriptor dd=BlockManagerTestUtil.getLocalDatanodeDescriptor(true);
assertEquals(0,dd.numBlocks());
BlockInfo blk=new BlockInfo(new Block(1L),1);
BlockInfo blk1=new BlockInfo(new Block(2L),2);
DatanodeStorageInfo[] storages=dd.getStorageInfos();
assertTrue(storages.length > 0);
final String storageID=storages[0].getStorageID();
assertTrue(storages[0].addBlock(blk));
assertEquals(1,dd.numBlocks());
assertFalse(dd.removeBlock(blk1));
assertEquals(1,dd.numBlocks());
assertFalse(storages[0].addBlock(blk));
assertEquals(1,dd.numBlocks());
assertTrue(storages[0].addBlock(blk1));
assertEquals(2,dd.numBlocks());
assertTrue(dd.removeBlock(blk));
assertEquals(1,dd.numBlocks());
assertTrue(dd.removeBlock(blk1));
assertEquals(0,dd.numBlocks());
}
IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* This test sends a random sequence of node registrations and node removals
* to the DatanodeManager (of nodes with different IDs and versions), and
* checks that the DatanodeManager keeps a correct count of different software
* versions at all times.
*/
@Test public void testNumVersionsReportedCorrect() throws IOException {
FSNamesystem fsn=Mockito.mock(FSNamesystem.class);
Mockito.when(fsn.hasWriteLock()).thenReturn(true);
DatanodeManager dm=new DatanodeManager(Mockito.mock(BlockManager.class),fsn,new Configuration());
Random rng=new Random();
int seed=rng.nextInt();
rng=new Random(seed);
LOG.info("Using seed " + seed + " for testing");
HashMap sIdToDnReg=new HashMap();
for (int i=0; i < NUM_ITERATIONS; ++i) {
if (rng.nextBoolean() && i % 3 == 0 && sIdToDnReg.size() != 0) {
int randomIndex=rng.nextInt() % sIdToDnReg.size();
Iterator> it=sIdToDnReg.entrySet().iterator();
for (int j=0; j < randomIndex - 1; ++j) {
it.next();
}
DatanodeRegistration toRemove=it.next().getValue();
LOG.info("Removing node " + toRemove.getDatanodeUuid() + " ip "+ toRemove.getXferAddr()+ " version : "+ toRemove.getSoftwareVersion());
dm.removeDatanode(toRemove);
it.remove();
}
else {
String storageID="someStorageID" + rng.nextInt(5000);
DatanodeRegistration dr=Mockito.mock(DatanodeRegistration.class);
Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID);
if (sIdToDnReg.containsKey(storageID)) {
dr=sIdToDnReg.get(storageID);
if (rng.nextBoolean()) {
dr.setIpAddr(dr.getIpAddr() + "newIP");
}
}
else {
String ip="someIP" + storageID;
Mockito.when(dr.getIpAddr()).thenReturn(ip);
Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
Mockito.when(dr.getXferPort()).thenReturn(9000);
}
Mockito.when(dr.getSoftwareVersion()).thenReturn("version" + rng.nextInt(5));
LOG.info("Registering node storageID: " + dr.getDatanodeUuid() + ", version: "+ dr.getSoftwareVersion()+ ", IP address: "+ dr.getXferAddr());
dm.registerDatanode(dr);
sIdToDnReg.put(storageID,dr);
}
Map mapToCheck=dm.getDatanodesSoftwareVersions();
for ( Entry it : sIdToDnReg.entrySet()) {
String ver=it.getValue().getSoftwareVersion();
if (!mapToCheck.containsKey(ver)) {
throw new AssertionError("The correct number of datanodes of a " + "version was not found on iteration " + i);
}
mapToCheck.put(ver,mapToCheck.get(ver) - 1);
if (mapToCheck.get(ver) == 0) {
mapToCheck.remove(ver);
}
}
for ( Entry entry : mapToCheck.entrySet()) {
LOG.info("Still in map: " + entry.getKey() + " has "+ entry.getValue());
}
assertEquals("The map of version counts returned by DatanodeManager was" + " not what it was expected to be on iteration " + i,0,mapToCheck.size());
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testContains() throws Exception {
DatanodeDescriptor nodeNotInMap=DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1/r4");
for (int i=0; i < dataNodes.length; i++) {
assertTrue(map.contains(dataNodes[i]));
}
assertFalse(map.contains(null));
assertFalse(map.contains(nodeNotInMap));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRemove() throws Exception {
DatanodeDescriptor nodeNotInMap=DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1/r4");
assertFalse(map.remove(nodeNotInMap));
assertTrue(map.remove(dataNodes[0]));
assertTrue(map.getDatanodeByHost("1.1.1.1.") == null);
assertTrue(map.getDatanodeByHost("2.2.2.2") == dataNodes[1]);
DatanodeDescriptor node=map.getDatanodeByHost("3.3.3.3");
assertTrue(node == dataNodes[2] || node == dataNodes[3]);
assertNull(map.getDatanodeByHost("4.4.4.4"));
assertTrue(map.remove(dataNodes[2]));
assertNull(map.getDatanodeByHost("1.1.1.1"));
assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]);
assertEquals(map.getDatanodeByHost("3.3.3.3"),dataNodes[3]);
assertTrue(map.remove(dataNodes[3]));
assertNull(map.getDatanodeByHost("1.1.1.1"));
assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]);
assertNull(map.getDatanodeByHost("3.3.3.3"));
assertFalse(map.remove(null));
assertTrue(map.remove(dataNodes[1]));
assertFalse(map.remove(dataNodes[1]));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetDatanodeByHost() throws Exception {
assertEquals(map.getDatanodeByHost("1.1.1.1"),dataNodes[0]);
assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]);
DatanodeDescriptor node=map.getDatanodeByHost("3.3.3.3");
assertTrue(node == dataNodes[2] || node == dataNodes[3]);
assertNull(map.getDatanodeByHost("4.4.4.4"));
}
InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testIncludeExcludeLists() throws IOException {
BlockManager bm=mock(BlockManager.class);
FSNamesystem fsn=mock(FSNamesystem.class);
Configuration conf=new Configuration();
HostFileManager hm=mock(HostFileManager.class);
HostFileManager.HostSet includedNodes=new HostFileManager.HostSet();
HostFileManager.HostSet excludedNodes=new HostFileManager.HostSet();
includedNodes.add(entry("127.0.0.1:12345"));
includedNodes.add(entry("localhost:12345"));
includedNodes.add(entry("127.0.0.1:12345"));
includedNodes.add(entry("127.0.0.2"));
excludedNodes.add(entry("127.0.0.1:12346"));
excludedNodes.add(entry("127.0.30.1:12346"));
Assert.assertEquals(2,includedNodes.size());
Assert.assertEquals(2,excludedNodes.size());
doReturn(includedNodes).when(hm).getIncludes();
doReturn(excludedNodes).when(hm).getExcludes();
DatanodeManager dm=new DatanodeManager(bm,fsn,conf);
Whitebox.setInternalState(dm,"hostFileManager",hm);
Map dnMap=(Map)Whitebox.getInternalState(dm,"datanodeMap");
Assert.assertEquals(2,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL).size());
Assert.assertEquals(2,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size());
dnMap.put("uuid-foo",new DatanodeDescriptor(new DatanodeID("127.0.0.1","localhost","uuid-foo",12345,1020,1021,1022)));
Assert.assertEquals(1,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size());
dnMap.put("uuid-bar",new DatanodeDescriptor(new DatanodeID("127.0.0.2","127.0.0.2","uuid-bar",12345,1020,1021,1022)));
Assert.assertEquals(0,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size());
DatanodeDescriptor spam=new DatanodeDescriptor(new DatanodeID("127.0.0" + ".3","127.0.0.3","uuid-spam",12345,1020,1021,1022));
spam.setLastUpdate(0);
includedNodes.add(entry("127.0.0.3:12345"));
dnMap.put("uuid-spam",spam);
Assert.assertEquals(1,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size());
dnMap.remove("uuid-spam");
Assert.assertEquals(1,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size());
excludedNodes.add(entry("127.0.0.3"));
Assert.assertEquals(0,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size());
}
InternalCallVerifier EqualityVerifier
@Test public void testDeduplication(){
HostFileManager.HostSet s=new HostFileManager.HostSet();
s.add(entry("127.0.0.1:12345"));
s.add(entry("localhost:12345"));
Assert.assertEquals(1,s.size());
s.add(entry("127.0.0.1:12345"));
Assert.assertEquals(1,s.size());
s.add(entry("127.0.0.1:12346"));
Assert.assertEquals(2,s.size());
s.add(entry("127.0.0.1"));
Assert.assertEquals(3,s.size());
s.add(entry("127.0.0.10"));
Assert.assertEquals(4,s.size());
}
InternalCallVerifier BooleanVerifier
@Test public void testRelation(){
HostFileManager.HostSet s=new HostFileManager.HostSet();
s.add(entry("127.0.0.1:123"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.1:12")));
Assert.assertFalse(s.match(entry("127.0.0.1")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.2")));
Assert.assertFalse(s.match(entry("127.0.0.2:123")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
s.add(entry("127.0.0.1"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertTrue(s.match(entry("127.0.0.1:12")));
Assert.assertTrue(s.match(entry("127.0.0.1")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.2")));
Assert.assertFalse(s.match(entry("127.0.0.2:123")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.2:123")));
s.add(entry("127.0.0.2:123"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertTrue(s.match(entry("127.0.0.1:12")));
Assert.assertTrue(s.match(entry("127.0.0.1")));
Assert.assertFalse(s.matchedBy(entry("127.0.0.1:12")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.2")));
Assert.assertTrue(s.match(entry("127.0.0.2:123")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.2")));
Assert.assertTrue(s.matchedBy(entry("127.0.0.2:123")));
}
InternalCallVerifier BooleanVerifier
@Test public void testNodeCount() throws Exception {
final Configuration conf=new HdfsConfiguration();
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).build();
try {
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
final HeartbeatManager hm=bm.getDatanodeManager().getHeartbeatManager();
final FileSystem fs=cluster.getFileSystem();
final Path FILE_PATH=new Path("/testfile");
DFSTestUtil.createFile(fs,FILE_PATH,1L,REPLICATION_FACTOR,1L);
DFSTestUtil.waitReplication(fs,FILE_PATH,REPLICATION_FACTOR);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,FILE_PATH);
final DatanodeDescriptor[] datanodes=hm.getDatanodes();
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
DatanodeDescriptor datanode=datanodes[0];
DataNodeProperties dnprop=cluster.stopDataNode(datanode.getXferAddr());
BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(),datanode.getXferAddr());
DFSTestUtil.waitReplication(fs,FILE_PATH,REPLICATION_FACTOR);
cluster.restartDataNode(dnprop);
cluster.waitActive();
initializeTimeout(TIMEOUT);
while (countNodes(block.getLocalBlock(),namesystem).excessReplicas() == 0) {
checkTimeout("excess replicas not detected");
}
DatanodeDescriptor nonExcessDN=null;
for ( DatanodeStorageInfo storage : bm.blocksMap.getStorages(block.getLocalBlock())) {
final DatanodeDescriptor dn=storage.getDatanodeDescriptor();
Collection blocks=bm.excessReplicateMap.get(dn.getDatanodeUuid());
if (blocks == null || !blocks.contains(block.getLocalBlock())) {
nonExcessDN=dn;
break;
}
}
assertTrue(nonExcessDN != null);
dnprop=cluster.stopDataNode(nonExcessDN.getXferAddr());
BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(),nonExcessDN.getXferAddr());
initializeTimeout(TIMEOUT);
while (countNodes(block.getLocalBlock(),namesystem).liveReplicas() != REPLICATION_FACTOR) {
checkTimeout("live replica count not correct",1000);
}
cluster.restartDataNode(dnprop);
cluster.waitActive();
initializeTimeout(TIMEOUT);
while (countNodes(block.getLocalBlock(),namesystem).excessReplicas() != 2) {
checkTimeout("excess replica count not equal to 2");
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test processOverReplicatedBlock can handle corrupt replicas fine.
* It make sure that it won't treat corrupt replicas as valid ones
* thus prevents NN deleting valid replicas but keeping
* corrupt ones.
*/
@Test public void testProcesOverReplicateBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0));
DataNodeProperties dnProps=cluster.stopDataNode(0);
File scanLog=new File(MiniDFSCluster.getFinalizedDir(cluster.getInstanceStorageDir(0,0),cluster.getNamesystem().getBlockPoolId()).getParent().toString() + "/../dncp_block_verification.log.prev");
for (int i=0; !scanLog.delete(); i++) {
assertTrue("Could not delete log file in one minute",i < 60);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
}
cluster.restartDataNode(dnProps);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
String blockPoolId=cluster.getNamesystem().getBlockPoolId();
final DatanodeID corruptDataNode=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2),blockPoolId);
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
final HeartbeatManager hm=bm.getDatanodeManager().getHeartbeatManager();
try {
namesystem.writeLock();
synchronized (hm) {
String corruptMachineName=corruptDataNode.getXferAddr();
for ( DatanodeDescriptor datanode : hm.getDatanodes()) {
if (!corruptMachineName.equals(datanode.getXferAddr())) {
datanode.getStorageInfos()[0].setUtilizationForTesting(100L,100L,0,100L);
datanode.updateHeartbeat(BlockManagerTestUtil.getStorageReportsForDatanode(datanode),0L,0L,0,0);
}
}
NameNodeAdapter.setReplication(namesystem,fileName.toString(),(short)1);
assertEquals(1,bm.countNodes(block.getLocalBlock()).liveReplicas());
}
}
finally {
namesystem.writeUnlock();
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test over replicated block should get invalidated when decreasing the
* replication for a partial block.
*/
@Test public void testInvalidateOverReplicatedBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
final FSNamesystem namesystem=cluster.getNamesystem();
final BlockManager bm=namesystem.getBlockManager();
FileSystem fs=cluster.getFileSystem();
Path p=new Path(MiniDFSCluster.getBaseDirectory(),"/foo1");
FSDataOutputStream out=fs.create(p,(short)2);
out.writeBytes("HDFS-3119: " + p);
out.hsync();
fs.setReplication(p,(short)1);
out.close();
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,p);
assertEquals("Expected only one live replica for the block",1,bm.countNodes(block.getLocalBlock()).liveReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The test verifies that replica for deletion is chosen on a node,
* with the oldest heartbeat, when this heartbeat is larger than the
* tolerable heartbeat interval.
* It creates a file with several blocks and replication 4.
* The last DN is configured to send heartbeats rarely.
* Test waits until the tolerable heartbeat interval expires, and reduces
* replication of the file. All replica deletions should be scheduled for the
* last node. No replicas will actually be deleted, since last DN doesn't
* send heartbeats.
*/
@Test public void testChooseReplicaToDelete() throws Exception {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,SMALL_BLOCK_SIZE);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,300);
cluster.startDataNodes(conf,1,true,null,null,null);
DataNode lastDN=cluster.getDataNodes().get(3);
DatanodeRegistration dnReg=DataNodeTestUtils.getDNRegistrationForBP(lastDN,namesystem.getBlockPoolId());
String lastDNid=dnReg.getDatanodeUuid();
final Path fileName=new Path("/foo2");
DFSTestUtil.createFile(fs,fileName,SMALL_FILE_LENGTH,(short)4,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)4);
DatanodeDescriptor nodeInfo=null;
long lastHeartbeat=0;
long waitTime=DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 * (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
do {
nodeInfo=namesystem.getBlockManager().getDatanodeManager().getDatanode(dnReg);
lastHeartbeat=nodeInfo.getLastUpdate();
}
while (now() - lastHeartbeat < waitTime);
fs.setReplication(fileName,(short)3);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(fileName),0,Long.MAX_VALUE);
namesystem.readLock();
Collection dnBlocks=namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
assertEquals("Replicas on node " + lastDNid + " should have been deleted",SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE,dnBlocks.size());
namesystem.readUnlock();
for ( BlockLocation location : locs) assertEquals("Block should still have 4 replicas",4,location.getNames().length);
}
finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testQueues(){
DatanodeDescriptor fakeDN=DFSTestUtil.getLocalDatanodeDescriptor();
DatanodeStorage storage=new DatanodeStorage("STORAGE_ID");
DatanodeStorageInfo storageInfo=new DatanodeStorageInfo(fakeDN,storage);
msgs.enqueueReportedBlock(storageInfo,block1Gs1,ReplicaState.FINALIZED);
msgs.enqueueReportedBlock(storageInfo,block1Gs2,ReplicaState.FINALIZED);
assertEquals(2,msgs.count());
assertNull(msgs.takeBlockQueue(block2Gs1));
assertEquals(2,msgs.count());
Queue q=msgs.takeBlockQueue(block1Gs2DifferentInstance);
assertEquals("ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," + "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",Joiner.on(",").join(q));
assertEquals(0,msgs.count());
assertNull(msgs.takeBlockQueue(block1Gs1));
assertEquals(0,msgs.count());
}
InternalCallVerifier EqualityVerifier
@Test public void testPendingDeletion() throws Exception {
final Path foo=new Path("/foo");
DFSTestUtil.createFile(dfs,foo,BLOCKSIZE,REPLICATION,0);
cluster.restartNameNode(true);
dfs.delete(foo,true);
Assert.assertEquals(0,cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(REPLICATION,cluster.getNamesystem().getPendingDeletionBlocks());
Thread.sleep(6000);
Assert.assertEquals(0,cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(0,cluster.getNamesystem().getPendingDeletionBlocks());
}
InternalCallVerifier EqualityVerifier
/**
* Test whether we can delay the deletion of unknown blocks in DataNode's
* first several block reports.
*/
@Test public void testPendingDeleteUnknownBlocks() throws Exception {
final int fileNum=5;
final Path[] files=new Path[fileNum];
final DataNodeProperties[] dnprops=new DataNodeProperties[REPLICATION];
for (int i=0; i < fileNum; i++) {
files[i]=new Path("/file" + i);
DFSTestUtil.createFile(dfs,files[i],BLOCKSIZE,REPLICATION,i);
}
waitForReplication();
for (int i=REPLICATION - 1; i >= 0; i--) {
dnprops[i]=cluster.stopDataNode(i);
}
Thread.sleep(2000);
for (int i=0; i < 2; i++) {
dfs.delete(files[i],true);
}
cluster.restartNameNode(false);
InvalidateBlocks invalidateBlocks=(InvalidateBlocks)Whitebox.getInternalState(cluster.getNamesystem().getBlockManager(),"invalidateBlocks");
InvalidateBlocks mockIb=Mockito.spy(invalidateBlocks);
Mockito.doReturn(1L).when(mockIb).getInvalidationDelay();
Whitebox.setInternalState(cluster.getNamesystem().getBlockManager(),"invalidateBlocks",mockIb);
Assert.assertEquals(0L,cluster.getNamesystem().getPendingDeletionBlocks());
for (int i=0; i < REPLICATION; i++) {
cluster.restartDataNode(dnprops[i],true);
}
cluster.waitActive();
for (int i=0; i < REPLICATION; i++) {
DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(i));
}
Thread.sleep(2000);
Assert.assertEquals(3,cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(4,cluster.getNamesystem().getPendingDeletionBlocks());
cluster.restartNameNode(true);
Thread.sleep(6000);
Assert.assertEquals(3,cluster.getNamesystem().getBlocksTotal());
Assert.assertEquals(0,cluster.getNamesystem().getPendingDeletionBlocks());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
* pending replications. Also make sure the blockReceivedAndDeleted call is
* idempotent to the pending replications.
*/
@Test public void testBlockReceived() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
DistributedFileSystem hdfs=cluster.getFileSystem();
FSNamesystem fsn=cluster.getNamesystem();
BlockManager blkManager=fsn.getBlockManager();
final String file="/tmp.txt";
final Path filePath=new Path(file);
short replFactor=1;
DFSTestUtil.createFile(hdfs,filePath,1024L,replFactor,0);
ArrayList datanodes=cluster.getDataNodes();
for (int i=0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i),true);
}
hdfs.setReplication(filePath,(short)DATANODE_COUNT);
BlockManagerTestUtil.computeAllPendingWork(blkManager);
assertEquals(1,blkManager.pendingReplications.size());
INodeFile fileNode=fsn.getFSDirectory().getINode4Write(file).asFile();
Block[] blocks=fileNode.getBlocks();
assertEquals(DATANODE_COUNT - 1,blkManager.pendingReplications.getNumReplicas(blocks[0]));
LocatedBlock locatedBlock=hdfs.getClient().getLocatedBlocks(file,0).get(0);
DatanodeInfo existingDn=(locatedBlock.getLocations())[0];
int reportDnNum=0;
String poolId=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR=datanodes.get(i).getDNRegistrationForBP(poolId);
StorageReceivedDeletedBlocks[] report={new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",new ReceivedDeletedBlockInfo[]{new ReceivedDeletedBlockInfo(blocks[0],BlockStatus.RECEIVED_BLOCK,"")})};
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR,poolId,report);
reportDnNum++;
}
}
assertEquals(DATANODE_COUNT - 3,blkManager.pendingReplications.getNumReplicas(blocks[0]));
for (int i=0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
DatanodeRegistration dnR=datanodes.get(i).getDNRegistrationForBP(poolId);
StorageReceivedDeletedBlocks[] report={new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",new ReceivedDeletedBlockInfo[]{new ReceivedDeletedBlockInfo(blocks[0],BlockStatus.RECEIVED_BLOCK,"")})};
cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR,poolId,report);
reportDnNum++;
}
}
assertEquals(DATANODE_COUNT - 3,blkManager.pendingReplications.getNumReplicas(blocks[0]));
for (int i=0; i < DATANODE_COUNT; i++) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i),false);
DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
}
Thread.sleep(5000);
assertEquals(0,blkManager.pendingReplications.size());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Test if BlockManager can correctly remove corresponding pending records
* when a file is deleted
* @throws Exception
*/
@Test public void testPendingAndInvalidate() throws Exception {
final Configuration CONF=new HdfsConfiguration();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024);
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,DFS_REPLICATION_INTERVAL);
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,DFS_REPLICATION_INTERVAL);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
FSNamesystem namesystem=cluster.getNamesystem();
BlockManager bm=namesystem.getBlockManager();
DistributedFileSystem fs=cluster.getFileSystem();
try {
Path filePath=new Path("/tmp.txt");
DFSTestUtil.createFile(fs,filePath,1024,(short)3,0L);
for ( DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true);
}
LocatedBlock block=NameNodeAdapter.getBlockLocations(cluster.getNameNode(),filePath.toString(),0,1).get(0);
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(),block.getLocations()[0],"STORAGE_ID","TEST");
bm.findAndMarkBlockAsCorrupt(block.getBlock(),block.getLocations()[1],"STORAGE_ID","TEST");
}
finally {
cluster.getNamesystem().writeUnlock();
}
BlockManagerTestUtil.computeAllPendingWork(bm);
BlockManagerTestUtil.updateState(bm);
assertEquals(bm.getPendingReplicationBlocksCount(),1L);
assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock().getLocalBlock()),2);
fs.delete(filePath,true);
int retries=10;
long pendingNum=bm.getPendingReplicationBlocksCount();
while (pendingNum != 0 && retries-- > 0) {
Thread.sleep(1000);
BlockManagerTestUtil.updateState(bm);
pendingNum=bm.getPendingReplicationBlocksCount();
}
assertEquals(pendingNum,0L);
}
finally {
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPendingReplication(){
PendingReplicationBlocks pendingReplications;
pendingReplications=new PendingReplicationBlocks(TIMEOUT * 1000);
pendingReplications.start();
DatanodeStorageInfo[] storages=DFSTestUtil.createDatanodeStorageInfos(10);
for (int i=0; i < storages.length; i++) {
Block block=new Block(i,i,0);
DatanodeStorageInfo[] targets=new DatanodeStorageInfo[i];
System.arraycopy(storages,0,targets,0,i);
pendingReplications.increment(block,DatanodeStorageInfo.toDatanodeDescriptors(targets));
}
assertEquals("Size of pendingReplications ",10,pendingReplications.size());
Block blk=new Block(8,8,0);
pendingReplications.decrement(blk,storages[7].getDatanodeDescriptor());
assertEquals("pendingReplications.getNumReplicas ",7,pendingReplications.getNumReplicas(blk));
for (int i=0; i < 7; i++) {
pendingReplications.decrement(blk,storages[i].getDatanodeDescriptor());
}
assertTrue(pendingReplications.size() == 9);
pendingReplications.increment(blk,DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(8)));
assertTrue(pendingReplications.size() == 10);
for (int i=0; i < 10; i++) {
Block block=new Block(i,i,0);
int numReplicas=pendingReplications.getNumReplicas(block);
assertTrue(numReplicas == i);
}
assertTrue(pendingReplications.getTimedOutBlocks() == null);
try {
Thread.sleep(1000);
}
catch ( Exception e) {
}
for (int i=10; i < 15; i++) {
Block block=new Block(i,i,0);
pendingReplications.increment(block,DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(i)));
}
assertTrue(pendingReplications.size() == 15);
int loop=0;
while (pendingReplications.size() > 0) {
try {
Thread.sleep(1000);
}
catch ( Exception e) {
}
loop++;
}
System.out.println("Had to wait for " + loop + " seconds for the lot to timeout");
assertEquals("Size of pendingReplications ",0,pendingReplications.size());
Block[] timedOut=pendingReplications.getTimedOutBlocks();
assertTrue(timedOut != null && timedOut.length == 15);
for (int i=0; i < timedOut.length; i++) {
assertTrue(timedOut[i].getBlockId() < 15);
}
pendingReplications.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test when a block's replica is removed from RBW folder in one of the
* datanode, namenode should ask to invalidate that corrupted block and
* schedule replication for one more replica for that under replicated block.
*/
@Test(timeout=600000) public void testBlockInvalidationWhenRBWReplicaMissedInDN() throws IOException, InterruptedException {
assumeTrue(!Path.WINDOWS);
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,2);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,300);
conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FSDataOutputStream out=null;
try {
final FSNamesystem namesystem=cluster.getNamesystem();
FileSystem fs=cluster.getFileSystem();
Path testPath=new Path("/tmp/TestRBWBlockInvalidation","foo1");
out=fs.create(testPath,(short)2);
out.writeBytes("HDFS-3157: " + testPath);
out.hsync();
cluster.startDataNodes(conf,1,true,null,null,null);
String bpid=namesystem.getBlockPoolId();
ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,testPath);
Block block=blk.getLocalBlock();
DataNode dn=cluster.getDataNodes().get(0);
File blockFile=DataNodeTestUtils.getBlockFile(dn,bpid,block);
File metaFile=DataNodeTestUtils.getMetaFile(dn,bpid,block);
assertTrue("Could not delete the block file from the RBW folder",blockFile.delete());
assertTrue("Could not delete the block meta file from the RBW folder",metaFile.delete());
out.close();
int liveReplicas=0;
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) < 2) {
LOG.info("Live Replicas after corruption: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be less than 2 replicas in the " + "liveReplicasMap",1,liveReplicas);
while (true) {
if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) > 1) {
LOG.info("Live Replicas after Rereplication: " + liveReplicas);
break;
}
Thread.sleep(100);
}
assertEquals("There should be two live replicas",2,liveReplicas);
while (true) {
Thread.sleep(100);
if (countReplicas(namesystem,blk).corruptReplicas() == 0) {
LOG.info("Corrupt Replicas becomes 0");
break;
}
}
}
finally {
if (out != null) {
out.close();
}
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-4799, a case where, upon restart, if there
* were RWR replicas with out-of-date genstamps, the NN could accidentally
* delete good replicas instead of the bad replicas.
*/
@Test(timeout=60000) public void testRWRInvalidation() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,RandomDeleterPolicy.class,BlockPlacementPolicy.class);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
List testPaths=Lists.newArrayList();
for (int i=0; i < 10; i++) {
testPaths.add(new Path("/test" + i));
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
List streams=Lists.newArrayList();
try {
for ( Path path : testPaths) {
FSDataOutputStream out=cluster.getFileSystem().create(path,(short)2);
streams.add(out);
out.writeBytes("old gs data\n");
out.hflush();
}
DataNodeProperties oldGenstampNode=cluster.stopDataNode(0);
for (int i=0; i < streams.size(); i++) {
Path path=testPaths.get(i);
FSDataOutputStream out=streams.get(i);
out.writeBytes("new gs data\n");
out.hflush();
cluster.getFileSystem().setReplication(path,(short)1);
out.close();
}
LOG.info("=========================== restarting cluster");
DataNodeProperties otherNode=cluster.stopDataNode(0);
cluster.restartNameNode();
cluster.restartDataNode(oldGenstampNode);
cluster.waitActive();
cluster.restartDataNode(otherNode);
cluster.waitActive();
cluster.getNameNode().getNamesystem().getBlockManager().computeInvalidateWork(2);
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
for ( Path path : testPaths) {
String ret=DFSTestUtil.readFile(cluster.getFileSystem(),path);
assertEquals("old gs data\n" + "new gs data\n",ret);
}
}
finally {
IOUtils.cleanup(LOG,streams.toArray(new Closeable[0]));
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier
/**
* Test for the chooseReplicaToDelete are processed based on
* block locality and free space
*/
@Test public void testChooseReplicaToDelete() throws Exception {
List replicaList=new ArrayList();
final Map> rackMap=new HashMap>();
dataNodes[0].setRemaining(4 * 1024 * 1024);
replicaList.add(storages[0]);
dataNodes[1].setRemaining(3 * 1024 * 1024);
replicaList.add(storages[1]);
dataNodes[2].setRemaining(2 * 1024 * 1024);
replicaList.add(storages[2]);
dataNodes[5].setRemaining(1 * 1024 * 1024);
replicaList.add(storages[5]);
for (int i=0; i < dataNodes.length; i++) {
dataNodes[i].setLastUpdate(Time.now());
}
List first=new ArrayList();
List second=new ArrayList();
replicator.splitNodesWithRack(replicaList,rackMap,first,second);
assertEquals(2,first.size());
assertEquals(2,second.size());
DatanodeStorageInfo chosen=replicator.chooseReplicaToDelete(null,null,(short)3,first,second);
assertEquals(chosen,storages[1]);
replicator.adjustSetsWithChosenReplica(rackMap,first,second,chosen);
assertEquals(0,first.size());
assertEquals(3,second.size());
chosen=replicator.chooseReplicaToDelete(null,null,(short)2,first,second);
assertEquals(chosen,storages[5]);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testChooseTargetWithStaleNodes() throws Exception {
dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1);
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
assertTrue(namenode.getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
DatanodeStorageInfo[] targets;
targets=chooseTarget(1);
assertEquals(targets.length,1);
assertEquals(storages[1],targets[0]);
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
List chosenNodes=new ArrayList();
targets=chooseTarget(1,chosenNodes,excludedNodes);
assertEquals(targets.length,1);
assertFalse(isOnSameRack(targets[0],dataNodes[0]));
dataNodes[0].setLastUpdate(Time.now());
namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,true);
String[] hosts=new String[]{"host1","host2","host3","host4","host5","host6"};
String[] racks=new String[]{"/d1/r1","/d1/r1","/d1/r2","/d1/r2","/d2/r3","/d2/r3"};
MiniDFSCluster miniCluster=new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts).numDataNodes(hosts.length).build();
miniCluster.waitActive();
try {
for (int i=0; i < 2; i++) {
DataNode dn=miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true);
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
}
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
int numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes,2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
DatanodeDescriptor staleNodeInfo=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(miniCluster.getDataNodes().get(0).getDatanodeId());
BlockPlacementPolicy replicator=miniCluster.getNameNode().getNamesystem().getBlockManager().getBlockPlacementPolicy();
DatanodeStorageInfo[] targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,3);
assertFalse(isOnSameRack(targets[0],staleNodeInfo));
for (int i=0; i < 4; i++) {
DataNode dn=miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true);
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1);
}
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes,4);
assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,3);
assertTrue(isOnSameRack(targets[0],staleNodeInfo));
for (int i=2; i < 4; i++) {
DataNode dn=miniCluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,false);
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now());
}
miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck();
numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes();
assertEquals(numStaleNodes,2);
assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
targets=chooseTarget(3,staleNodeInfo);
assertEquals(targets.length,3);
assertFalse(isOnSameRack(targets[0],staleNodeInfo));
}
finally {
miniCluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Test for the high priority blocks are processed before the low priority
* blocks.
*/
@Test(timeout=60000) public void testReplicationWithPriority() throws Exception {
int DFS_NAMENODE_REPLICATION_INTERVAL=1000;
int HIGH_PRIORITY=0;
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build();
try {
cluster.waitActive();
final UnderReplicatedBlocks neededReplications=cluster.getNameNode().getNamesystem().getBlockManager().neededReplications;
for (int i=0; i < 100; i++) {
neededReplications.add(new Block(random.nextLong()),2,0,3);
}
Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
neededReplications.add(new Block(random.nextLong()),1,0,3);
Thread.sleep(DFS_NAMENODE_REPLICATION_INTERVAL);
assertFalse("Not able to clear the element from high priority list",neededReplications.iterator(HIGH_PRIORITY).hasNext());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests whether the value returned by
* DFSUtil.getReplWorkMultiplier() is positive,
* and whether an IllegalArgumentException will be thrown
* when a non-positive value is retrieved
*/
@Test public void testGetReplWorkMultiplier(){
Configuration conf=new Configuration();
int blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertTrue(blocksReplWorkMultiplier > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
assertEquals(blocksReplWorkMultiplier,3);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
exception.expect(IllegalArgumentException.class);
blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica, and the rest
* should be placed on a third rack.
* @throws Exception
*/
@Test public void testChooseTarget2() throws Exception {
Set excludedNodes;
DatanodeStorageInfo[] targets;
List chosenNodes=new ArrayList();
excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(0,chosenNodes,excludedNodes);
assertEquals(targets.length,0);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(1,chosenNodes,excludedNodes);
assertEquals(targets.length,1);
assertEquals(storages[0],targets[0]);
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(2,chosenNodes,excludedNodes);
assertEquals(targets.length,2);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(3,chosenNodes,excludedNodes);
assertEquals(targets.length,3);
assertEquals(storages[0],targets[0]);
assertFalse(isOnSameRack(targets[0],targets[1]));
assertTrue(isOnSameRack(targets[1],targets[2]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
targets=chooseTarget(4,chosenNodes,excludedNodes);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
for (int i=1; i < 4; i++) {
assertFalse(isOnSameRack(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets=replicator.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2,targets.length);
int i=0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++) ;
assertTrue(i < targets.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, it tries to choose more targets than available nodes and
* check the result.
* @throws Exception
*/
@Test public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0);
}
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
DatanodeStorageInfo[] targets=chooseTarget(NUM_OF_DATANODES);
assertEquals(targets.length,NUM_OF_DATANODES - 2);
final List log=appender.getLog();
assertNotNull(log);
assertFalse(log.size() == 0);
final LoggingEvent lastLogEntry=log.get(log.size() - 1);
assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel()));
assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
for (int i=0; i < 2; i++) {
updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This testcase tests whether the default value returned by
* DFSUtil.getInvalidateWorkPctPerIteration() is positive,
* and whether an IllegalArgumentException will be thrown
* when 0.0f is retrieved
*/
@Test public void testGetInvalidateWorkPctPerIteration(){
Configuration conf=new Configuration();
float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertTrue(blocksInvalidateWorkPct > 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.5f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,0.5f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"1.0f");
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
assertEquals(blocksInvalidateWorkPct,1.0f,blocksInvalidateWorkPct * 1e-7);
conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.0f");
exception.expect(IllegalArgumentException.class);
blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests that chooseTarget with considerLoad set to true correctly calculates
* load with decommissioned nodes.
*/
@Test public void testChooseTargetWithDecomNodes() throws IOException {
namenode.getNamesystem().writeLock();
try {
String blockPoolId=namenode.getNamesystem().getBlockPoolId();
dnManager.handleHeartbeat(dnrList.get(3),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),blockPoolId,dataNodes[3].getCacheCapacity(),dataNodes[3].getCacheRemaining(),2,0,0);
dnManager.handleHeartbeat(dnrList.get(4),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),blockPoolId,dataNodes[4].getCacheCapacity(),dataNodes[4].getCacheRemaining(),4,0,0);
dnManager.handleHeartbeat(dnrList.get(5),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),blockPoolId,dataNodes[5].getCacheCapacity(),dataNodes[5].getCacheRemaining(),4,0,0);
final int load=2 + 4 + 4;
FSNamesystem fsn=namenode.getNamesystem();
assertEquals((double)load / 6,fsn.getInServiceXceiverAverage(),EPSILON);
for (int i=0; i < 3; i++) {
DatanodeDescriptor d=dnManager.getDatanode(dnrList.get(i));
dnManager.startDecommission(d);
d.setDecommissioned();
}
assertEquals((double)load / 3,fsn.getInServiceXceiverAverage(),EPSILON);
DatanodeStorageInfo[] targets=namenode.getNamesystem().getBlockManager().getBlockPlacementPolicy().chooseTarget("testFile.txt",3,dataNodes[0],new ArrayList(),false,null,1024,StorageType.DEFAULT);
assertEquals(3,targets.length);
Set targetSet=new HashSet(Arrays.asList(targets));
for (int i=3; i < storages.length; i++) {
assertTrue(targetSet.contains(storages[i]));
}
}
finally {
dataNodes[0].stopDecommission();
dataNodes[1].stopDecommission();
dataNodes[2].stopDecommission();
namenode.getNamesystem().writeUnlock();
}
NameNode.LOG.info("Done working on it");
}
InternalCallVerifier EqualityVerifier
/**
* Test for the chooseReplicaToDelete are processed based on
* block locality and free space
*/
@Test public void testChooseReplicaToDelete() throws Exception {
List replicaList=new ArrayList();
final Map> rackMap=new HashMap>();
dataNodes[0].setRemaining(4 * 1024 * 1024);
replicaList.add(storages[0]);
dataNodes[1].setRemaining(3 * 1024 * 1024);
replicaList.add(storages[1]);
dataNodes[2].setRemaining(2 * 1024 * 1024);
replicaList.add(storages[2]);
dataNodes[5].setRemaining(1 * 1024 * 1024);
replicaList.add(storages[5]);
List first=new ArrayList();
List second=new ArrayList();
replicator.splitNodesWithRack(replicaList,rackMap,first,second);
assertEquals(3,first.size());
assertEquals(1,second.size());
DatanodeStorageInfo chosen=replicator.chooseReplicaToDelete(null,null,(short)3,first,second);
assertEquals(chosen,storages[1]);
replicator.adjustSetsWithChosenReplica(rackMap,first,second,chosen);
assertEquals(2,first.size());
assertEquals(1,second.size());
chosen=replicator.chooseReplicaToDelete(null,null,(short)2,first,second);
assertEquals(chosen,storages[2]);
replicator.adjustSetsWithChosenReplica(rackMap,first,second,chosen);
assertEquals(0,first.size());
assertEquals(2,second.size());
chosen=replicator.chooseReplicaToDelete(null,null,(short)1,first,second);
assertEquals(chosen,storages[5]);
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* In this testcase, client is dataNodes[0], but the dataNodes[1] is
* not allowed to be chosen. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on a different
* rack, the 3rd should be on same rack as the 2nd replica but in different
* node group, and the rest should be placed on a third rack.
* @throws Exception
*/
@Test public void testChooseTarget2() throws Exception {
DatanodeStorageInfo[] targets;
BlockPlacementPolicyDefault repl=(BlockPlacementPolicyDefault)replicator;
List chosenNodes=new ArrayList();
Set excludedNodes=new HashSet();
excludedNodes.add(dataNodes[1]);
targets=repl.chooseTarget(filename,4,dataNodes[0],chosenNodes,false,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
assertEquals(targets.length,4);
assertEquals(storages[0],targets[0]);
assertTrue(cluster.isNodeGroupAware());
for (int i=1; i < 4; i++) {
assertFalse(isOnSameNodeGroup(targets[0],targets[i]));
}
assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3]));
assertFalse(isOnSameRack(targets[1],targets[3]));
excludedNodes.clear();
chosenNodes.clear();
excludedNodes.add(dataNodes[1]);
chosenNodes.add(storages[2]);
targets=repl.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT);
System.out.println("targets=" + Arrays.asList(targets));
assertEquals(2,targets.length);
int i=0;
for (; i < targets.length && !storages[2].equals(targets[i]); i++) ;
assertTrue(i < targets.length);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that adding blocks with different replication counts puts them
* into different queues
* @throws Throwable if something goes wrong
*/
@Test public void testBlockPriorities() throws Throwable {
UnderReplicatedBlocks queues=new UnderReplicatedBlocks();
Block block1=new Block(1);
Block block2=new Block(2);
Block block_very_under_replicated=new Block(3);
Block block_corrupt=new Block(4);
assertAdded(queues,block1,1,0,3);
assertEquals(1,queues.getUnderReplicatedBlockCount());
assertEquals(1,queues.size());
assertInLevel(queues,block1,UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
assertFalse(queues.add(block1,1,0,3));
assertAdded(queues,block2,2,0,3);
assertEquals(2,queues.getUnderReplicatedBlockCount());
assertEquals(2,queues.size());
assertInLevel(queues,block2,UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
assertAdded(queues,block_corrupt,0,0,3);
assertEquals(3,queues.size());
assertEquals(2,queues.getUnderReplicatedBlockCount());
assertEquals(1,queues.getCorruptBlockSize());
assertInLevel(queues,block_corrupt,UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
assertAdded(queues,block_very_under_replicated,4,0,25);
assertInLevel(queues,block_very_under_replicated,UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetUgiFromToken() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/");
ServletContext context=mock(ServletContext.class);
String realUser="TheDoctor";
String user="TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
Text ownerText=new Text(user);
DelegationTokenIdentifier dtId=new DelegationTokenIdentifier(ownerText,ownerText,new Text(realUser));
Token token=new Token(dtId,new DummySecretManager(0,0,0,0));
String tokenString=token.encodeToUrlString();
request=getMockRequest(null,null,null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromToken(ugi);
request=getMockRequest(realUser,null,null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromToken(ugi);
request=getMockRequest("rogue",null,null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromToken(ugi);
request=getMockRequest(null,user,null);
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromToken(ugi);
request=getMockRequest(null,null,"rogue");
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Usernames not matched: name=rogue != expected=" + user,ioe.getMessage());
}
request=getMockRequest(null,user,"rogue");
when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Usernames not matched: name=rogue != expected=" + user,ioe.getMessage());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetNonProxyUgi() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/");
ServletContext context=mock(ServletContext.class);
String realUser="TheDoctor";
String user="TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
request=getMockRequest(null,null,null);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage());
}
request=getMockRequest(null,realUser,null);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage());
}
request=getMockRequest(realUser,null,null);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNull(ugi.getRealUser());
Assert.assertEquals(ugi.getShortUserName(),realUser);
checkUgiFromAuth(ugi);
request=getMockRequest(realUser,realUser,null);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNull(ugi.getRealUser());
Assert.assertEquals(ugi.getShortUserName(),realUser);
checkUgiFromAuth(ugi);
request=getMockRequest(realUser,user,null);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Usernames not matched: name=" + user + " != expected="+ realUser,ioe.getMessage());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testReadWriteReplicaState(){
try {
DataOutputBuffer out=new DataOutputBuffer();
DataInputBuffer in=new DataInputBuffer();
for ( HdfsServerConstants.ReplicaState repState : HdfsServerConstants.ReplicaState.values()) {
repState.write(out);
in.reset(out.getData(),out.getLength());
HdfsServerConstants.ReplicaState result=HdfsServerConstants.ReplicaState.read(in);
assertTrue("testReadWrite error !!!",repState == result);
out.reset();
in.reset();
}
}
catch ( Exception ex) {
fail("testReadWrite ex error ReplicaState");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetProxyUgi() throws IOException {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/");
ServletContext context=mock(ServletContext.class);
String realUser="TheDoctor";
String user="TheNurse";
conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(realUser),"*");
conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(realUser),"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi;
HttpServletRequest request;
request=getMockRequest(null,null,user);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage());
}
request=getMockRequest(null,realUser,user);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage());
}
request=getMockRequest(realUser,null,user);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromAuth(ugi);
request=getMockRequest(realUser,realUser,user);
ugi=JspHelper.getUGI(context,request,conf);
Assert.assertNotNull(ugi.getRealUser());
Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser);
Assert.assertEquals(ugi.getShortUserName(),user);
checkUgiFromAuth(ugi);
request=getMockRequest(realUser,user,user);
try {
JspHelper.getUGI(context,request,conf);
Assert.fail("bad request allowed");
}
catch ( IOException ioe) {
Assert.assertEquals("Usernames not matched: name=" + user + " != expected="+ realUser,ioe.getMessage());
}
try {
request=getMockRequest(user,null,realUser);
JspHelper.getUGI(context,request,conf);
Assert.fail("bad proxy request allowed");
}
catch ( AuthorizationException ae) {
Assert.assertEquals("User: " + user + " is not allowed to impersonate "+ realUser,ae.getMessage());
}
try {
request=getMockRequest(user,user,realUser);
JspHelper.getUGI(context,request,conf);
Assert.fail("bad proxy request allowed");
}
catch ( AuthorizationException ae) {
Assert.assertEquals("User: " + user + " is not allowed to impersonate "+ realUser,ae.getMessage());
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=300000) public void blockReport_09() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
final int DN_N1=DN_N0 + 1;
final int bytesChkSum=1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,bytesChkSum);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,6 * bytesChkSum);
shutDownCluster();
startUpCluster();
try {
writeFile(METHOD_NAME,12 * bytesChkSum,filePath);
Block bl=findBlock(filePath,12 * bytesChkSum);
BlockChecker bc=new BlockChecker(filePath);
bc.start();
waitForTempReplica(bl,DN_N1);
DataNode dn=cluster.getDataNodes().get(DN_N1);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,true,true);
sendBlockReports(dnR,poolId,reports);
printStats();
assertEquals("Wrong number of PendingReplication blocks",2,cluster.getNamesystem().getPendingReplicationBlocks());
try {
bc.join();
}
catch ( InterruptedException e) {
}
}
finally {
resetConfiguration();
}
}
InternalCallVerifier ConditionMatcher
/**
* Similar to BlockReport_03() but works with two DNs
* Test writes a file and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is completed test finds a block from
* the second DN and sets its GS to be < of original one.
* this is the markBlockAsCorrupt case 3 so we expect one pending deletion
* Block report is forced and the check for # of currupted blocks is performed.
* Another block is chosen and its length is set to a lesser than original.
* A check for another corrupted block is performed after yet another
* BlockReport
* @throws IOException in case of an error
*/
@Test(timeout=300000) public void blockReport_07() throws Exception {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
final int DN_N1=DN_N0 + 1;
writeFile(METHOD_NAME,FILE_SIZE,filePath);
startDNandWait(filePath,true);
DataNode dn=cluster.getDataNodes().get(DN_N1);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,true,false);
sendBlockReports(dnR,poolId,reports);
printStats();
assertThat("Wrong number of corrupt blocks",cluster.getNamesystem().getCorruptReplicaBlocks(),is(0L));
assertThat("Wrong number of PendingDeletion blocks",cluster.getNamesystem().getPendingDeletionBlocks(),is(1L));
assertThat("Wrong number of PendingReplication blocks",cluster.getNamesystem().getPendingReplicationBlocks(),is(0L));
reports=getBlockReports(dn,poolId,false,true);
sendBlockReports(dnR,poolId,reports);
printStats();
assertThat("Wrong number of corrupt blocks",cluster.getNamesystem().getCorruptReplicaBlocks(),is(1L));
assertThat("Wrong number of PendingDeletion blocks",cluster.getNamesystem().getPendingDeletionBlocks(),is(1L));
assertThat("Wrong number of PendingReplication blocks",cluster.getNamesystem().getPendingReplicationBlocks(),is(0L));
printStats();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The test set the configuration parameters for a large block size and
* restarts initiated single-node cluster.
* Then it writes a file > block_size and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is started and at least one TEMPORARY
* replica is found test forces BlockReport process and checks
* if the TEMPORARY replica isn't reported on it.
* Eventually, the configuration is being restored into the original state.
* @throws IOException in case of an error
*/
@Test(timeout=300000) public void blockReport_08() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
final int DN_N1=DN_N0 + 1;
final int bytesChkSum=1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,bytesChkSum);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,6 * bytesChkSum);
shutDownCluster();
startUpCluster();
try {
ArrayList blocks=writeFile(METHOD_NAME,12 * bytesChkSum,filePath);
Block bl=findBlock(filePath,12 * bytesChkSum);
BlockChecker bc=new BlockChecker(filePath);
bc.start();
waitForTempReplica(bl,DN_N1);
DataNode dn=cluster.getDataNodes().get(DN_N1);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
printStats();
assertEquals("Wrong number of PendingReplication blocks",blocks.size(),cluster.getNamesystem().getPendingReplicationBlocks());
try {
bc.join();
}
catch ( InterruptedException e) {
}
}
finally {
resetConfiguration();
}
}
InternalCallVerifier ConditionMatcher
/**
* Test writes a file and closes it.
* Block reported is generated with an extra block.
* Block report is forced and the check for # of pendingdeletion
* blocks is performed.
* @throws IOException in case of an error
*/
@Test(timeout=300000) public void blockReport_04() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
DataNode dn=cluster.getDataNodes().get(DN_N0);
String poolId=cluster.getNamesystem().getBlockPoolId();
ExtendedBlock b=new ExtendedBlock(poolId,rand.nextLong(),1024L,rand.nextLong());
dn.getFSDataset().createRbw(StorageType.DEFAULT,b);
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
printStats();
assertThat("Wrong number of corrupt blocks",cluster.getNamesystem().getCorruptReplicaBlocks(),is(0L));
assertThat("Wrong number of PendingDeletion blocks",cluster.getNamesystem().getPendingDeletionBlocks(),is(1L));
}
InternalCallVerifier EqualityVerifier
/**
* Test creates a file and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is completed test runs
* Block report and checks that no underreplicated blocks are left
* @throws IOException in case of an error
*/
@Test(timeout=300000) public void blockReport_06() throws Exception {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
final int DN_N1=DN_N0 + 1;
writeFile(METHOD_NAME,FILE_SIZE,filePath);
startDNandWait(filePath,true);
DataNode dn=cluster.getDataNodes().get(DN_N1);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
printStats();
assertEquals("Wrong number of PendingReplication Blocks",0,cluster.getNamesystem().getUnderReplicatedBlocks());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test write a file, verifies and closes it. Then a couple of random blocks
* is removed and BlockReport is forced; the FSNamesystem is pushed to
* recalculate required DN's activities such as replications and so on.
* The number of missing and under-replicated blocks should be the same in
* case of a single-DN cluster.
* @throws IOException in case of errors
*/
@Test(timeout=300000) public void blockReport_02() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
LOG.info("Running test " + METHOD_NAME);
Path filePath=new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong());
File dataDir=new File(cluster.getDataDirectory());
assertTrue(dataDir.isDirectory());
List blocks2Remove=new ArrayList();
List removedIndex=new ArrayList();
List lBlocks=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_START,FILE_SIZE).getLocatedBlocks();
while (removedIndex.size() != 2) {
int newRemoveIndex=rand.nextInt(lBlocks.size());
if (!removedIndex.contains(newRemoveIndex)) removedIndex.add(newRemoveIndex);
}
for ( Integer aRemovedIndex : removedIndex) {
blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + lBlocks.size());
}
final DataNode dn0=cluster.getDataNodes().get(DN_N0);
for ( ExtendedBlock b : blocks2Remove) {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing the block " + b.getBlockName());
}
for ( File f : findAllFiles(dataDir,new MyFileFilter(b.getBlockName(),true))) {
DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b);
if (!f.delete()) {
LOG.warn("Couldn't delete " + b.getBlockName());
}
else {
LOG.debug("Deleted file " + f.toString());
}
}
}
waitTil(DN_RESCAN_EXTRA_WAIT);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn0.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn0,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem().getBlockManager());
printStats();
assertEquals("Wrong number of MissingBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getMissingBlocksCount());
assertEquals("Wrong number of UnderReplicatedBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getUnderReplicatedBlocks());
}
InternalCallVerifier ConditionMatcher
/**
* Test writes a file and closes it.
* Block reported is generated with a bad GS for a single block.
* Block report is forced and the check for # of corrupted blocks is performed.
* @throws IOException in case of an error
*/
@Test(timeout=300000) public void blockReport_03() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
writeFile(METHOD_NAME,FILE_SIZE,filePath);
DataNode dn=cluster.getDataNodes().get(DN_N0);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,true,false);
sendBlockReports(dnR,poolId,reports);
printStats();
assertThat("Wrong number of corrupt blocks",cluster.getNamesystem().getCorruptReplicaBlocks(),is(1L));
assertThat("Wrong number of PendingDeletion blocks",cluster.getNamesystem().getPendingDeletionBlocks(),is(0L));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test write a file, verifies and closes it. Then the length of the blocks
* are messed up and BlockReport is forced.
* The modification of blocks' length has to be ignored
* @throws java.io.IOException on an error
*/
@Test(timeout=300000) public void blockReport_01() throws IOException {
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
ArrayList blocks=prepareForRide(filePath,METHOD_NAME,FILE_SIZE);
if (LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + blocks.size());
}
long[] oldLengths=new long[blocks.size()];
int tempLen;
for (int i=0; i < blocks.size(); i++) {
Block b=blocks.get(i);
if (LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " before\t"+ "Size "+ b.getNumBytes());
}
oldLengths[i]=b.getNumBytes();
if (LOG.isDebugEnabled()) {
LOG.debug("Setting new length");
}
tempLen=rand.nextInt(BLOCK_SIZE);
b.set(b.getBlockId(),tempLen,b.getGenerationStamp());
if (LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " after\t "+ "Size "+ b.getNumBytes());
}
}
DataNode dn=cluster.getDataNodes().get(DN_N0);
String poolId=cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false);
sendBlockReports(dnR,poolId,reports);
List blocksAfterReport=DFSTestUtil.getAllBlocks(fs.open(filePath));
if (LOG.isDebugEnabled()) {
LOG.debug("After mods: Number of blocks allocated " + blocksAfterReport.size());
}
for (int i=0; i < blocksAfterReport.size(); i++) {
ExtendedBlock b=blocksAfterReport.get(i).getBlock();
assertEquals("Length of " + i + "th block is incorrect",oldLengths[i],b.getNumBytes());
}
}
InternalCallVerifier EqualityVerifier
/**
* Test for the case where one of the DNs in the pipeline is in the
* process of doing a block report exactly when the block is closed.
* In this case, the block report becomes delayed until after the
* block is marked completed on the NN, and hence it reports an RBW
* replica for a COMPLETE block. Such a report should not be marked
* corrupt.
* This is a regression test for HDFS-2791.
*/
@Test(timeout=300000) public void testOneReplicaRbwReportArrivesAfterBlockCompleted() throws Exception {
final CountDownLatch brFinished=new CountDownLatch(1);
DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG){
@Override protected Object passThrough( InvocationOnMock invocation) throws Throwable {
try {
return super.passThrough(invocation);
}
finally {
brFinished.countDown();
}
}
}
;
final String METHOD_NAME=GenericTestUtils.getMethodName();
Path filePath=new Path("/" + METHOD_NAME + ".dat");
REPL_FACTOR=2;
startDNandWait(null,false);
NameNode nn=cluster.getNameNode();
FSDataOutputStream out=fs.create(filePath,REPL_FACTOR);
try {
AppendTestUtil.write(out,0,10);
out.hflush();
DataNode dn=cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy=DataNodeTestUtils.spyOnBposToNN(dn,nn);
Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.anyObject(),Mockito.anyString(),Mockito.anyObject());
dn.scheduleAllBlockReport(0);
delayer.waitForCall();
}
finally {
IOUtils.closeStream(out);
}
delayer.proceed();
brFinished.await();
BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager());
assertEquals(0,nn.getNamesystem().getCorruptReplicaBlocks());
DFSTestUtil.readFile(fs,filePath);
cluster.stopDataNode(1);
DFSTestUtil.readFile(fs,filePath);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test datanode block pool initialization error handling.
* Failure in initializing a block pool should not cause NPE.
*/
@Test public void testBPInitErrorHandling() throws Exception {
final DataNode mockDn=Mockito.mock(DataNode.class);
Mockito.doReturn(true).when(mockDn).shouldRun();
Configuration conf=new Configuration();
File dnDataDir=new File(new File(TEST_BUILD_DATA,"testBPInitErrorHandling"),"data");
conf.set(DFS_DATANODE_DATA_DIR_KEY,dnDataDir.toURI().toString());
Mockito.doReturn(conf).when(mockDn).getConf();
Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf();
Mockito.doReturn(DataNodeMetrics.create(conf,"fake dn")).when(mockDn).getMetrics();
final AtomicInteger count=new AtomicInteger();
Mockito.doAnswer(new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
if (count.getAndIncrement() == 0) {
throw new IOException("faked initBlockPool exception");
}
Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset();
return null;
}
}
).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
BPOfferService bpos=setupBPOSForNNs(mockDn,mockNN1,mockNN2);
List actors=bpos.getBPServiceActors();
assertEquals(2,actors.size());
bpos.start();
try {
waitForInitialization(bpos);
waitForBlockReport(mockNN1,mockNN2);
}
finally {
bpos.stop();
}
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier NullVerifier HybridVerifier
/**
* Test that the DataNode determines the active NameNode correctly
* based on the HA-related information in heartbeat responses.
* See HDFS-2627.
*/
@Test public void testPickActiveNameNode() throws Exception {
BPOfferService bpos=setupBPOSForNNs(mockNN1,mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
assertNull(bpos.getActiveNN());
mockHaStatuses[0]=new NNHAStatusHeartbeat(HAServiceState.ACTIVE,1);
bpos.triggerHeartbeatForTests();
assertSame(mockNN1,bpos.getActiveNN());
mockHaStatuses[1]=new NNHAStatusHeartbeat(HAServiceState.ACTIVE,2);
bpos.triggerHeartbeatForTests();
assertSame(mockNN2,bpos.getActiveNN());
bpos.triggerHeartbeatForTests();
assertSame(mockNN2,bpos.getActiveNN());
mockHaStatuses[1]=new NNHAStatusHeartbeat(HAServiceState.STANDBY,2);
bpos.triggerHeartbeatForTests();
assertNull(bpos.getActiveNN());
mockHaStatuses[0]=new NNHAStatusHeartbeat(HAServiceState.ACTIVE,3);
bpos.triggerHeartbeatForTests();
assertSame(mockNN1,bpos.getActiveNN());
}
finally {
bpos.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that the BPOS can register to talk to two different NNs,
* sends block reports to both, etc.
*/
@Test public void testBasicFunctionality() throws Exception {
BPOfferService bpos=setupBPOSForNNs(mockNN1,mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
Mockito.verify(mockNN1).registerDatanode(Mockito.any(DatanodeRegistration.class));
Mockito.verify(mockNN2).registerDatanode(Mockito.any(DatanodeRegistration.class));
waitForBlockReport(mockNN1);
waitForBlockReport(mockNN2);
bpos.notifyNamenodeReceivedBlock(FAKE_BLOCK,"","");
ReceivedDeletedBlockInfo[] ret=waitForBlockReceived(FAKE_BLOCK,mockNN1);
assertEquals(1,ret.length);
assertEquals(FAKE_BLOCK.getLocalBlock(),ret[0].getBlock());
ret=waitForBlockReceived(FAKE_BLOCK,mockNN2);
assertEquals(1,ret.length);
assertEquals(FAKE_BLOCK.getLocalBlock(),ret[0].getBlock());
}
finally {
bpos.stop();
}
}
InternalCallVerifier ConditionMatcher
/**
* Verify NameNode behavior when a given DN reports multiple replicas
* of a given block.
*/
@Test public void testBlockHasMultipleReplicasOnSameDN() throws IOException {
String filename=makeFileName(GenericTestUtils.getMethodName());
Path filePath=new Path(filename);
DFSTestUtil.createFile(fs,filePath,BLOCK_SIZE,BLOCK_SIZE * NUM_BLOCKS,BLOCK_SIZE,NUM_DATANODES,seed);
LocatedBlocks locatedBlocks=client.getLocatedBlocks(filePath.toString(),0,BLOCK_SIZE * NUM_BLOCKS);
DataNode dn=cluster.getDataNodes().get(0);
DatanodeRegistration dnReg=dn.getDNRegistrationForBP(bpid);
StorageBlockReport reports[]=new StorageBlockReport[cluster.getStoragesPerDatanode()];
ArrayList blocks=new ArrayList();
for ( LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
blocks.add(locatedBlock.getBlock().getLocalBlock());
}
for (int i=0; i < cluster.getStoragesPerDatanode(); ++i) {
BlockListAsLongs bll=new BlockListAsLongs(blocks,null);
FsVolumeSpi v=dn.getFSDataset().getVolumes().get(i);
DatanodeStorage dns=new DatanodeStorage(v.getStorageID());
reports[i]=new StorageBlockReport(dns,bll.getBlockListAsLongs());
}
cluster.getNameNodeRpc().blockReport(dnReg,bpid,reports);
locatedBlocks=client.getLocatedBlocks(filename,0,BLOCK_SIZE * NUM_BLOCKS);
for ( LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
DatanodeInfo[] locations=locatedBlock.getLocations();
assertThat(locations.length,is((int)NUM_DATANODES));
assertThat(locations[0].getDatanodeUuid(),not(locations[1].getDatanodeUuid()));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBlockReplacement() throws Exception {
final Configuration CONF=new HdfsConfiguration();
final String[] INITIAL_RACKS={"/RACK0","/RACK1","/RACK2"};
final String[] NEW_RACKS={"/RACK2"};
final short REPLICATION_FACTOR=(short)3;
final int DEFAULT_BLOCK_SIZE=1024;
final Random r=new Random();
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,DEFAULT_BLOCK_SIZE / 2);
CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,500);
cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).racks(INITIAL_RACKS).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path fileName=new Path("/tmp.txt");
DFSTestUtil.createFile(fs,fileName,DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,r.nextLong());
DFSTestUtil.waitReplication(fs,fileName,REPLICATION_FACTOR);
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,CONF);
List locatedBlocks=client.getNamenode().getBlockLocations("/tmp.txt",0,DEFAULT_BLOCK_SIZE).getLocatedBlocks();
assertEquals(1,locatedBlocks.size());
LocatedBlock block=locatedBlocks.get(0);
DatanodeInfo[] oldNodes=block.getLocations();
assertEquals(oldNodes.length,3);
ExtendedBlock b=block.getBlock();
cluster.startDataNodes(CONF,1,true,null,NEW_RACKS);
cluster.waitActive();
DatanodeInfo[] datanodes=client.datanodeReport(DatanodeReportType.ALL);
DatanodeInfo newNode=null;
for ( DatanodeInfo node : datanodes) {
Boolean isNewNode=true;
for ( DatanodeInfo oldNode : oldNodes) {
if (node.equals(oldNode)) {
isNewNode=false;
break;
}
}
if (isNewNode) {
newNode=node;
break;
}
}
assertTrue(newNode != null);
DatanodeInfo source=null;
ArrayList proxies=new ArrayList(2);
for ( DatanodeInfo node : datanodes) {
if (node != newNode) {
if (node.getNetworkLocation().equals(newNode.getNetworkLocation())) {
source=node;
}
else {
proxies.add(node);
}
}
}
assertTrue(source != null && proxies.size() == 2);
LOG.info("Testcase 1: Proxy " + newNode + " does not contain the block "+ b);
assertFalse(replaceBlock(b,source,newNode,proxies.get(0)));
LOG.info("Testcase 2: Destination " + proxies.get(1) + " contains the block "+ b);
assertFalse(replaceBlock(b,source,proxies.get(0),proxies.get(1)));
LOG.info("Testcase 3: Source=" + source + " Proxy="+ proxies.get(0)+ " Destination="+ newNode);
assertTrue(replaceBlock(b,source,proxies.get(0),newNode));
checkBlocks(new DatanodeInfo[]{newNode,proxies.get(0),proxies.get(1)},fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client);
LOG.info("Testcase 4: invalid del hint " + proxies.get(0));
assertTrue(replaceBlock(b,proxies.get(0),proxies.get(1),source));
checkBlocks(proxies.toArray(new DatanodeInfo[proxies.size()]),fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test(timeout=120000) public void testFadviseAfterWriteThenRead() throws Exception {
LOG.info("testFadviseAfterWriteThenRead");
tracker.clear();
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
String TEST_PATH="/test";
int TEST_PATH_LEN=MAX_TEST_FILE_LEN;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
createHdfsFile(fs,new Path(TEST_PATH),TEST_PATH_LEN,true);
ExtendedBlock block=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_PATH,0,Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName=MiniDFSCluster.getBlockFile(0,block).getName();
Stats stats=tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0,TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear();
readHdfsFile(fs,new Path(TEST_PATH),Long.MAX_VALUE,true);
Assert.assertNotNull(stats);
stats.assertDroppedInRange(0,TEST_PATH_LEN - WRITE_PACKET_SIZE);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test(timeout=120000) public void testNoFadviseAfterWriteThenRead() throws Exception {
LOG.info("testNoFadviseAfterWriteThenRead");
tracker.clear();
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
String TEST_PATH="/test";
int TEST_PATH_LEN=MAX_TEST_FILE_LEN;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
createHdfsFile(fs,new Path(TEST_PATH),TEST_PATH_LEN,false);
ExtendedBlock block=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_PATH,0,Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName=MiniDFSCluster.getBlockFile(0,block).getName();
Stats stats=tracker.getStats(fadvisedFileName);
Assert.assertNull(stats);
readHdfsFile(fs,new Path(TEST_PATH),Long.MAX_VALUE,false);
Assert.assertNull(stats);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
/**
* Test the scenario where the DataNode defaults to not dropping the cache,
* but our client defaults are set.
*/
@Test(timeout=120000) public void testClientDefaults() throws Exception {
LOG.info("testClientDefaults");
tracker.clear();
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY,false);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY,false);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS,true);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES,true);
MiniDFSCluster cluster=null;
String TEST_PATH="/test";
int TEST_PATH_LEN=MAX_TEST_FILE_LEN;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
createHdfsFile(fs,new Path(TEST_PATH),TEST_PATH_LEN,null);
ExtendedBlock block=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_PATH,0,Long.MAX_VALUE).get(0).getBlock();
String fadvisedFileName=MiniDFSCluster.getBlockFile(0,block).getName();
Stats stats=tracker.getStats(fadvisedFileName);
stats.assertDroppedInRange(0,TEST_PATH_LEN - WRITE_PACKET_SIZE);
stats.clear();
readHdfsFile(fs,new Path(TEST_PATH),Long.MAX_VALUE,null);
Assert.assertNotNull(stats);
stats.assertDroppedInRange(0,TEST_PATH_LEN - WRITE_PACKET_SIZE);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier ConditionMatcher HybridVerifier
@Test(timeout=30000) public void testDataDirParsing() throws Throwable {
Configuration conf=new Configuration();
List locations;
File dir0=new File("/dir0");
File dir1=new File("/dir1");
File dir2=new File("/dir2");
File dir3=new File("/dir3");
String locations1="[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3";
conf.set(DFS_DATANODE_DATA_DIR_KEY,locations1);
locations=DataNode.getStorageLocations(conf);
assertThat(locations.size(),is(4));
assertThat(locations.get(0).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(0).getUri(),is(dir0.toURI()));
assertThat(locations.get(1).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(1).getUri(),is(dir1.toURI()));
assertThat(locations.get(2).getStorageType(),is(StorageType.SSD));
assertThat(locations.get(2).getUri(),is(dir2.toURI()));
assertThat(locations.get(3).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(3).getUri(),is(dir3.toURI()));
String locations2="[BadMediaType]/dir0,[ssd]/dir1,[disk]/dir2";
conf.set(DFS_DATANODE_DATA_DIR_KEY,locations2);
try {
locations=DataNode.getStorageLocations(conf);
fail();
}
catch ( IllegalArgumentException iae) {
DataNode.LOG.info("The exception is expected.",iae);
}
String locations3="/dir0,/dir1";
conf.set(DFS_DATANODE_DATA_DIR_KEY,locations3);
locations=DataNode.getStorageLocations(conf);
assertThat(locations.size(),is(2));
assertThat(locations.get(0).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(0).getUri(),is(dir0.toURI()));
assertThat(locations.get(1).getStorageType(),is(StorageType.DISK));
assertThat(locations.get(1).getUri(),is(dir1.toURI()));
}
InternalCallVerifier BooleanVerifier
/**
* Test BPService Thread Exit
*/
@Test public void testBPServiceExit() throws Exception {
DataNode dn=cluster.getDataNodes().get(0);
stopBPServiceThreads(1,dn);
assertTrue("DataNode should not exit",dn.isDatanodeUp());
stopBPServiceThreads(2,dn);
assertFalse("DataNode should exit",dn.isDatanodeUp());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDataNodeMXBean() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
List datanodes=cluster.getDataNodes();
Assert.assertEquals(datanodes.size(),1);
DataNode datanode=datanodes.get(0);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo");
String clusterId=(String)mbs.getAttribute(mxbeanName,"ClusterId");
Assert.assertEquals(datanode.getClusterId(),clusterId);
String version=(String)mbs.getAttribute(mxbeanName,"Version");
Assert.assertEquals(datanode.getVersion(),version);
String rpcPort=(String)mbs.getAttribute(mxbeanName,"RpcPort");
Assert.assertEquals(datanode.getRpcPort(),rpcPort);
String httpPort=(String)mbs.getAttribute(mxbeanName,"HttpPort");
Assert.assertEquals(datanode.getHttpPort(),httpPort);
String namenodeAddresses=(String)mbs.getAttribute(mxbeanName,"NamenodeAddresses");
Assert.assertEquals(datanode.getNamenodeAddresses(),namenodeAddresses);
String volumeInfo=(String)mbs.getAttribute(mxbeanName,"VolumeInfo");
Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),replaceDigits(volumeInfo));
int xceiverCount=(Integer)mbs.getAttribute(mxbeanName,"XceiverCount");
Assert.assertEquals(datanode.getXceiverCount(),xceiverCount);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testDataNodeMetrics() throws Exception {
Configuration conf=new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs=cluster.getFileSystem();
final long LONG_FILE_LEN=Integer.MAX_VALUE + 1L;
DFSTestUtil.createFile(fs,new Path("/tmp.txt"),LONG_FILE_LEN,(short)1,1L);
List datanodes=cluster.getDataNodes();
assertEquals(datanodes.size(),1);
DataNode datanode=datanodes.get(0);
MetricsRecordBuilder rb=getMetrics(datanode.getMetrics().name());
assertCounter("BytesWritten",LONG_FILE_LEN,rb);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Tests that round-trip acks in a datanode write pipeline are correctly
* measured.
*/
@Test public void testRoundTripAckMetric() throws Exception {
final int datanodeCount=2;
final int interval=1;
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,"" + interval);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(datanodeCount).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path testFile=new Path("/testRoundTripAckMetric.txt");
FSDataOutputStream fsout=fs.create(testFile,(short)datanodeCount);
DFSOutputStream dout=(DFSOutputStream)fsout.getWrappedStream();
dout.setChunksPerPacket(5);
dout.setArtificialSlowdown(3000);
fsout.write(new byte[10000]);
DatanodeInfo[] pipeline=null;
int count=0;
while (pipeline == null && count < 5) {
pipeline=dout.getPipeline();
System.out.println("Waiting for pipeline to be created.");
Thread.sleep(1000);
count++;
}
DatanodeInfo headInfo=pipeline[0];
DataNode headNode=null;
for ( DataNode datanode : cluster.getDataNodes()) {
if (datanode.getDatanodeId().equals(headInfo)) {
headNode=datanode;
break;
}
}
assertNotNull("Could not find the head of the datanode write pipeline",headNode);
Thread.sleep((interval + 1) * 1000);
MetricsRecordBuilder dnMetrics=getMetrics(headNode.getMetrics().name());
assertTrue("Expected non-zero number of acks",getLongCounter("PacketAckRoundTripTimeNanosNumOps",dnMetrics) > 0);
assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s",dnMetrics);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testSendDataPacketMetrics() throws Exception {
Configuration conf=new HdfsConfiguration();
final int interval=1;
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,"" + interval);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs=cluster.getFileSystem();
Path tmpfile=new Path("/tmp.txt");
DFSTestUtil.createFile(fs,tmpfile,(long)1,(short)1,1L);
DFSTestUtil.readFile(fs,tmpfile);
List datanodes=cluster.getDataNodes();
assertEquals(datanodes.size(),1);
DataNode datanode=datanodes.get(0);
MetricsRecordBuilder rb=getMetrics(datanode.getMetrics().name());
assertCounter("SendDataPacketTransferNanosNumOps",(long)2,rb);
assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps",(long)2,rb);
Thread.sleep((interval + 1) * 1000);
String sec=interval + "s";
assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec,rb);
assertQuantileGauges("SendDataPacketTransferNanos" + sec,rb);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDNWithInvalidStorageWithHA() throws Exception {
MiniDFSNNTopology top=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1")).addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1")));
top.setFederation(true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
try {
cluster.startDataNodes(conf,1,true,null,null);
Thread.sleep(10000);
DataNode dn=cluster.getDataNodes().get(0);
assertTrue("Datanode should be running",dn.isDatanodeUp());
assertEquals("BPOfferService should be running",1,dn.getAllBpOs().length);
DataNodeProperties dnProp=cluster.stopDataNode(0);
cluster.getNameNode(0).stop();
cluster.getNameNode(1).stop();
Configuration nn1=cluster.getConfiguration(0);
Configuration nn2=cluster.getConfiguration(1);
StartupOption.FORMAT.setClusterId("cluster-2");
DFSTestUtil.formatNameNode(nn1);
MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),FSNamesystem.getNamespaceDirs(nn2),nn2);
cluster.restartNameNode(0,false);
cluster.restartNameNode(1,false);
cluster.restartDataNode(dnProp);
Thread.sleep(10000);
dn=cluster.getDataNodes().get(0);
assertFalse("Datanode should have shutdown as only service failed",dn.isDatanodeUp());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* starts single nn and single dn and verifies registration and handshake
* @throws IOException
*/
@Test public void testFedSingleNN() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nameNodePort(9927).build();
try {
NameNode nn1=cluster.getNameNode();
assertNotNull("cannot create nn1",nn1);
String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID();
int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress());
DataNode dn=cluster.getDataNodes().get(0);
final Map volInfos=dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0);
int i=0;
for ( Map.Entry e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue());
}
assertEquals("number of volumes is wrong",2,volInfos.size());
for ( BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="+ bpos.bpRegistration.getDatanodeUuid()+ "; nna="+ getNNSocketAddress(bpos));
}
BPOfferService bpos1=dn.getAllBpOs()[0];
bpos1.triggerBlockReportForTests();
assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress());
assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1);
assertEquals("wrong cid",dn.getClusterId(),cid1);
cluster.shutdown();
assertEquals(0,dn.getAllBpOs().length);
cluster=null;
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMiniDFSClusterWithMultipleNN() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
Assert.assertEquals("(1)Should be 2 namenodes",2,cluster.getNumNameNodes());
cluster.addNameNode(conf,0);
Assert.assertEquals("(1)Should be 3 namenodes",3,cluster.getNumNameNodes());
}
catch ( IOException ioe) {
Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
}
finally {
cluster.shutdown();
}
conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1)).build();
try {
Assert.assertNotNull(cluster);
cluster.waitActive();
Assert.assertEquals("(2)Should be 1 namenodes",1,cluster.getNumNameNodes());
cluster.addNameNode(conf,0);
Assert.assertEquals("(2)Should be 2 namenodes",2,cluster.getNumNameNodes());
}
catch ( IOException ioe) {
Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
}
finally {
cluster.shutdown();
}
conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
Assert.assertNotNull(cluster);
Assert.assertEquals("(2)Should be 1 namenodes",1,cluster.getNumNameNodes());
cluster.addNameNode(conf,9929);
Assert.fail("shouldn't be able to add another NN to non federated cluster");
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().startsWith("cannot add namenode"));
Assert.assertEquals("(3)Should be 1 namenodes",1,cluster.getNumNameNodes());
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testClusterIdMismatchAtStartupWithHA() throws Exception {
MiniDFSNNTopology top=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0")).addNN(new MiniDFSNNTopology.NNConf("nn1"))).addNameservice(new MiniDFSNNTopology.NSConf("ns2").addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid")).addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid")));
top.setFederation(true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build();
try {
cluster.startDataNodes(conf,1,true,null,null);
Thread.sleep(10000);
DataNode dn=cluster.getDataNodes().get(0);
assertTrue("Datanode should be running",dn.isDatanodeUp());
assertEquals("Only one BPOfferService should be running",1,dn.getAllBpOs().length);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* start multiple NNs and single DN and verifies per BP registrations and
* handshakes.
* @throws IOException
*/
@Test public void test2NNRegistration() throws IOException {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
assertNotNull("cannot create nn1",nn1);
assertNotNull("cannot create nn2",nn2);
String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
String bpid2=FSImageTestUtil.getFSImage(nn2).getBlockPoolID();
String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID();
String cid2=FSImageTestUtil.getFSImage(nn2).getClusterID();
int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
int lv2=FSImageTestUtil.getFSImage(nn2).getLayoutVersion();
int ns1=FSImageTestUtil.getFSImage(nn1).getNamespaceID();
int ns2=FSImageTestUtil.getFSImage(nn2).getNamespaceID();
assertNotSame("namespace ids should be different",ns1,ns2);
LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress());
LOG.info("nn2: lv=" + lv2 + ";cid="+ cid2+ ";bpid="+ bpid2+ ";uri="+ nn2.getNameNodeAddress());
DataNode dn=cluster.getDataNodes().get(0);
final Map volInfos=dn.data.getVolumeInfoMap();
Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0);
int i=0;
for ( Map.Entry e : volInfos.entrySet()) {
LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue());
}
assertEquals("number of volumes is wrong",2,volInfos.size());
for ( BPOfferService bpos : dn.getAllBpOs()) {
LOG.info("BP: " + bpos);
}
BPOfferService bpos1=dn.getAllBpOs()[0];
BPOfferService bpos2=dn.getAllBpOs()[1];
if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
BPOfferService tmp=bpos1;
bpos1=bpos2;
bpos2=tmp;
}
assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress());
assertEquals("wrong nn address",getNNSocketAddress(bpos2),nn2.getNameNodeAddress());
assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1);
assertEquals("wrong bpid",bpos2.getBlockPoolId(),bpid2);
assertEquals("wrong cid",dn.getClusterId(),cid1);
assertEquals("cid should be same",cid2,cid1);
assertEquals("namespace should be same",bpos1.bpNSInfo.namespaceID,ns1);
assertEquals("namespace should be same",bpos2.bpNSInfo.namespaceID,ns2);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testClusterIdMismatch() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
DataNode dn=cluster.getDataNodes().get(0);
BPOfferService[] bposs=dn.getAllBpOs();
LOG.info("dn bpos len (should be 2):" + bposs.length);
Assert.assertEquals("should've registered with two namenodes",bposs.length,2);
cluster.addNameNode(conf,9938);
Thread.sleep(500);
bposs=dn.getAllBpOs();
LOG.info("dn bpos len (should be 3):" + bposs.length);
Assert.assertEquals("should've registered with three namenodes",bposs.length,3);
StartupOption.FORMAT.setClusterId("DifferentCID");
cluster.addNameNode(conf,9948);
NameNode nn4=cluster.getNameNode(3);
assertNotNull("cannot create nn4",nn4);
Thread.sleep(500);
bposs=dn.getAllBpOs();
LOG.info("dn bpos len (still should be 3):" + bposs.length);
Assert.assertEquals("should've registered with three namenodes",3,bposs.length);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=600000) public void testDatanodePeersXceiver() throws Exception {
try {
startCluster();
String testFile1="/TestDataNodeXceiver1.dat";
String testFile2="/TestDataNodeXceiver2.dat";
String testFile3="/TestDataNodeXceiver3.dat";
DFSClient client1=new DFSClient(NameNode.getAddress(conf),conf);
DFSClient client2=new DFSClient(NameNode.getAddress(conf),conf);
DFSClient client3=new DFSClient(NameNode.getAddress(conf),conf);
DFSOutputStream s1=(DFSOutputStream)client1.create(testFile1,true);
DFSOutputStream s2=(DFSOutputStream)client2.create(testFile2,true);
DFSOutputStream s3=(DFSOutputStream)client3.create(testFile3,true);
byte[] toWrite=new byte[1024 * 1024 * 8];
Random rb=new Random(1111);
rb.nextBytes(toWrite);
s1.write(toWrite,0,1024 * 1024 * 8);
s1.flush();
s2.write(toWrite,0,1024 * 1024 * 8);
s2.flush();
s3.write(toWrite,0,1024 * 1024 * 8);
s3.flush();
assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer().getNumPeersXceiver());
s1.close();
s2.close();
s3.close();
assertTrue(dn.getXferServer().getNumPeersXceiver() == dn.getXferServer().getNumPeersXceiver());
client1.close();
client2.close();
client3.close();
}
finally {
shutdownCluster();
}
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test(timeout=600000) public void testDatanodeRollingUpgradeWithRollback() throws Exception {
try {
startCluster();
Path testFile1=new Path("/TestDataNodeRollingUpgrade1.dat");
DFSTestUtil.createFile(fs,testFile1,FILE_SIZE,REPL_FACTOR,SEED);
String fileContents1=DFSTestUtil.readFile(fs,testFile1);
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
startRollingUpgrade();
File blockFile=getBlockForFile(testFile1,true);
File trashFile=getTrashFileForBlock(blockFile,false);
deleteAndEnsureInTrash(testFile1,blockFile,trashFile);
rollbackRollingUpgrade();
ensureTrashRestored(blockFile,trashFile);
assert (fs.exists(testFile1));
String fileContents2=DFSTestUtil.readFile(fs,testFile1);
assertThat(fileContents1,is(fileContents2));
}
finally {
shutdownCluster();
}
}
InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test that individual volume failures do not cause DNs to fail, that
* all volumes failed on a single datanode do cause it to fail, and
* that the capacities and liveliness is adjusted correctly in the NN.
*/
@Test public void testSuccessiveVolumeFailures() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
Thread.sleep(WAIT_FOR_HEARTBEATS);
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
final long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
File dn1Vol1=new File(dataDir,"data" + (2 * 0 + 1));
File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1));
File dn3Vol1=new File(dataDir,"data" + (2 * 2 + 1));
File dn3Vol2=new File(dataDir,"data" + (2 * 2 + 2));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,false));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false));
Path file1=new Path("/test1");
DFSTestUtil.createFile(fs,file1,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file1,(short)3);
ArrayList dns=cluster.getDataNodes();
assertTrue("DN1 should be up",dns.get(0).isDatanodeUp());
assertTrue("DN2 should be up",dns.get(1).isDatanodeUp());
assertTrue("DN3 should be up",dns.get(2).isDatanodeUp());
assertCounter("VolumeFailures",1L,getMetrics(dns.get(0).getMetrics().name()));
assertCounter("VolumeFailures",1L,getMetrics(dns.get(1).getMetrics().name()));
assertCounter("VolumeFailures",0L,getMetrics(dns.get(2).getMetrics().name()));
assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH;
DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,false));
Path file2=new Path("/test2");
DFSTestUtil.createFile(fs,file2,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file2,(short)3);
assertTrue("DN3 should still be up",dns.get(2).isDatanodeUp());
assertCounter("VolumeFailures",1L,getMetrics(dns.get(2).getMetrics().name()));
ArrayList live=new ArrayList();
ArrayList dead=new ArrayList();
dm.fetchDatanodes(live,dead,false);
live.clear();
dead.clear();
dm.fetchDatanodes(live,dead,false);
assertEquals("DN3 should have 1 failed volume",1,live.get(2).getVolumeFailures());
dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,3,origCapacity - (3 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,false));
Path file3=new Path("/test3");
DFSTestUtil.createFile(fs,file3,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file3,(short)2);
DFSTestUtil.waitForDatanodeDeath(dns.get(2));
assertCounter("VolumeFailures",2L,getMetrics(dns.get(2).getMetrics().name()));
DFSTestUtil.waitForDatanodeStatus(dm,2,1,2,origCapacity - (4 * dnCapacity),WAIT_FOR_HEARTBEATS);
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,true));
assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,true));
cluster.restartDataNodes();
cluster.waitActive();
Path file4=new Path("/test4");
DFSTestUtil.createFile(fs,file4,1024,(short)3,1L);
DFSTestUtil.waitReplication(fs,file4,(short)3);
DFSTestUtil.waitForDatanodeStatus(dm,3,0,0,origCapacity,WAIT_FOR_HEARTBEATS);
}
InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier
/**
* Test that a volume that is considered failed on startup is seen as
* a failed volume by the NN.
*/
@Test public void testFailedVolumeOnStartupIsCounted() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm);
File dir=new File(cluster.getInstanceStorageDir(0,0),"current");
try {
prepareDirToFail(dir);
restartDatanodes(1,false);
assertEquals(true,cluster.getDataNodes().get(0).isBPServiceAlive(cluster.getNamesystem().getBlockPoolId()));
DFSTestUtil.waitForDatanodeStatus(dm,1,0,1,origCapacity / 2,WAIT_FOR_HEARTBEATS);
}
finally {
FileUtil.chmod(dir.toString(),"755");
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN tolerates a failed-to-use scenario during
* its start-up.
*/
@Test public void testValidVolumesAtStartup() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
cluster.shutdownDataNodes();
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,1);
File tld=new File(MiniDFSCluster.getBaseDirectory(),"badData");
File dataDir1=new File(tld,"data1");
File dataDir1Actual=new File(dataDir1,"1");
dataDir1Actual.mkdirs();
File dataDir2=new File(tld,"data2");
prepareDirToFail(dataDir2);
File dataDir2Actual=new File(dataDir2,"2");
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,dataDir1Actual.getPath() + "," + dataDir2Actual.getPath());
cluster.startDataNodes(conf,1,false,null,null);
cluster.waitActive();
try {
assertTrue("The DN should have started up fine.",cluster.isDataNodeUp());
DataNode dn=cluster.getDataNodes().get(0);
String si=DataNodeTestUtils.getFSDataset(dn).getStorageInfo();
assertTrue("The DN should have started with this directory",si.contains(dataDir1Actual.getPath()));
assertFalse("The DN shouldn't have a bad directory.",si.contains(dataDir2Actual.getPath()));
}
finally {
cluster.shutdownDataNodes();
FileUtil.chmod(dataDir2.toString(),"755");
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testRecoverTransitionReadFailure() throws IOException {
final int numLocations=3;
List locations=createStorageLocations(numLocations,true);
try {
storage.recoverTransitionRead(mockDN,nsInfo,locations,START_OPT);
fail("An IOException should throw: all StorageLocations are NON_EXISTENT");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e);
}
assertEquals(0,storage.getNumStorageDirs());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* This test enforces the behavior that if there is an exception from
* doTransition() during DN starts up, the storage directories that have
* already been processed are still visible, i.e., in
* DataStorage.storageDirs().
*/
@Test public void testRecoverTransitionReadDoTransitionFailure() throws IOException {
final int numLocations=3;
List locations=createStorageLocations(numLocations);
String bpid=nsInfo.getBlockPoolID();
storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT);
storage.unlockAll();
storage=new DataStorage();
nsInfo.clusterID="cluster1";
try {
storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT);
fail("Expect to throw an exception from doTransition()");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Incompatible clusterIDs",e);
}
assertEquals(numLocations,storage.getNumStorageDirs());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testAddStorageDirectories() throws IOException, URISyntaxException {
final int numLocations=3;
final int numNamespace=3;
List locations=createStorageLocations(numLocations);
List namespaceInfos=createNamespaceInfos(numNamespace);
for ( NamespaceInfo ni : namespaceInfos) {
storage.addStorageLocations(mockDN,ni,locations,START_OPT);
for ( StorageLocation sl : locations) {
checkDir(sl.getFile());
checkDir(sl.getFile(),ni.getBlockPoolID());
}
}
assertEquals(numLocations,storage.getNumStorageDirs());
locations=createStorageLocations(numLocations);
try {
storage.addStorageLocations(mockDN,namespaceInfos.get(0),locations,START_OPT);
fail("Expected to throw IOException: adding active directories.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e);
}
assertEquals(numLocations,storage.getNumStorageDirs());
locations=createStorageLocations(6);
storage.addStorageLocations(mockDN,nsInfo,locations,START_OPT);
assertEquals(6,storage.getNumStorageDirs());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSoftwareVersionDifferences() throws Exception {
assertEquals(VersionInfo.getVersion(),actor.retrieveNamespaceInfo().getSoftwareVersion());
doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
assertEquals("4.0.0",actor.retrieveNamespaceInfo().getSoftwareVersion());
doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
try {
actor.retrieveNamespaceInfo();
fail("Should have thrown an exception for NN with too-low version");
}
catch ( IncorrectVersionException ive) {
GenericTestUtils.assertExceptionContains("The reported NameNode version is too low",ive);
LOG.info("Got expected exception",ive);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDfsAdminDeleteBlockPool() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1,namesServerId2");
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs1=cluster.getFileSystem(0);
FileSystem fs2=cluster.getFileSystem(1);
DFSTestUtil.createFile(fs1,new Path("/alpha"),1024,(short)1,54);
DFSTestUtil.createFile(fs2,new Path("/beta"),1024,(short)1,54);
DataNode dn1=cluster.getDataNodes().get(0);
String bpid1=cluster.getNamesystem(0).getBlockPoolId();
String bpid2=cluster.getNamesystem(1).getBlockPoolId();
File dn1StorageDir1=cluster.getInstanceStorageDir(0,0);
File dn1StorageDir2=cluster.getInstanceStorageDir(0,1);
Configuration nn1Conf=cluster.getConfiguration(0);
nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1");
dn1.refreshNamenodes(nn1Conf);
assertEquals(1,dn1.getAllBpOs().length);
DFSAdmin admin=new DFSAdmin(nn1Conf);
String dn1Address=dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
String[] args={"-deleteBlockPool",dn1Address,bpid2};
int ret=admin.run(args);
assertFalse(0 == ret);
verifyBlockPoolDirectories(true,dn1StorageDir1,bpid2);
verifyBlockPoolDirectories(true,dn1StorageDir2,bpid2);
String[] forceArgs={"-deleteBlockPool",dn1Address,bpid2,"force"};
ret=admin.run(forceArgs);
assertEquals(0,ret);
verifyBlockPoolDirectories(false,dn1StorageDir1,bpid2);
verifyBlockPoolDirectories(false,dn1StorageDir2,bpid2);
verifyBlockPoolDirectories(true,dn1StorageDir1,bpid1);
verifyBlockPoolDirectories(true,dn1StorageDir2,bpid1);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testDeleteBlockPool() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1,namesServerId2");
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(2).build();
cluster.waitActive();
FileSystem fs1=cluster.getFileSystem(0);
FileSystem fs2=cluster.getFileSystem(1);
DFSTestUtil.createFile(fs1,new Path("/alpha"),1024,(short)2,54);
DFSTestUtil.createFile(fs2,new Path("/beta"),1024,(short)2,54);
DataNode dn1=cluster.getDataNodes().get(0);
DataNode dn2=cluster.getDataNodes().get(1);
String bpid1=cluster.getNamesystem(0).getBlockPoolId();
String bpid2=cluster.getNamesystem(1).getBlockPoolId();
File dn1StorageDir1=cluster.getInstanceStorageDir(0,0);
File dn1StorageDir2=cluster.getInstanceStorageDir(0,1);
File dn2StorageDir1=cluster.getInstanceStorageDir(1,0);
File dn2StorageDir2=cluster.getInstanceStorageDir(1,1);
try {
dn1.deleteBlockPool(bpid1,true);
fail("Must not delete a running block pool");
}
catch ( IOException expected) {
}
Configuration nn1Conf=cluster.getConfiguration(1);
nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId2");
dn1.refreshNamenodes(nn1Conf);
assertEquals(1,dn1.getAllBpOs().length);
try {
dn1.deleteBlockPool(bpid1,false);
fail("Must not delete if any block files exist unless " + "force is true");
}
catch ( IOException expected) {
}
verifyBlockPoolDirectories(true,dn1StorageDir1,bpid1);
verifyBlockPoolDirectories(true,dn1StorageDir2,bpid1);
dn1.deleteBlockPool(bpid1,true);
verifyBlockPoolDirectories(false,dn1StorageDir1,bpid1);
verifyBlockPoolDirectories(false,dn1StorageDir2,bpid1);
fs1.delete(new Path("/alpha"),true);
File finalDir1=MiniDFSCluster.getFinalizedDir(dn2StorageDir1,bpid1);
File finalDir2=MiniDFSCluster.getFinalizedDir(dn2StorageDir1,bpid2);
while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) || (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) {
try {
Thread.sleep(3000);
}
catch ( Exception ignored) {
}
}
cluster.shutdownNameNode(0);
try {
dn2.deleteBlockPool(bpid1,true);
fail("Must not delete a running block pool");
}
catch ( IOException expected) {
}
dn2.refreshNamenodes(nn1Conf);
assertEquals(1,dn2.getAllBpOs().length);
verifyBlockPoolDirectories(true,dn2StorageDir1,bpid1);
verifyBlockPoolDirectories(true,dn2StorageDir2,bpid1);
dn2.deleteBlockPool(bpid1,false);
verifyBlockPoolDirectories(false,dn2StorageDir1,bpid1);
verifyBlockPoolDirectories(false,dn2StorageDir2,bpid1);
verifyBlockPoolDirectories(true,dn1StorageDir1,bpid2);
verifyBlockPoolDirectories(true,dn1StorageDir2,bpid2);
verifyBlockPoolDirectories(true,dn2StorageDir1,bpid2);
verifyBlockPoolDirectories(true,dn2StorageDir2,bpid2);
Path gammaFile=new Path("/gamma");
DFSTestUtil.createFile(fs2,gammaFile,1024,(short)1,55);
fs2.setReplication(gammaFile,(short)2);
DFSTestUtil.waitReplication(fs2,gammaFile,(short)2);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Test that when there is a failure replicating a block the temporary
* and meta files are cleaned up and subsequent replication succeeds.
*/
@Test public void testReplicationError() throws Exception {
final Path fileName=new Path("/test.txt");
final int fileLen=1;
DFSTestUtil.createFile(fs,fileName,1,(short)1,1L);
DFSTestUtil.waitReplication(fs,fileName,(short)1);
LocatedBlocks blocks=NameNodeAdapter.getBlockLocations(cluster.getNameNode(),fileName.toString(),0,(long)fileLen);
assertEquals("Should only find 1 block",blocks.locatedBlockCount(),1);
LocatedBlock block=blocks.get(0);
cluster.startDataNodes(conf,1,true,null,null);
cluster.waitActive();
final int sndNode=1;
DataNode datanode=cluster.getDataNodes().get(sndNode);
InetSocketAddress target=datanode.getXferAddress();
Socket s=new Socket(target.getAddress(),target.getPort());
DataOutputStream out=new DataOutputStream(s.getOutputStream());
DataChecksum checksum=DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512);
new Sender(out).writeBlock(block.getBlock(),StorageType.DEFAULT,BlockTokenSecretManager.DUMMY_TOKEN,"",new DatanodeInfo[0],new StorageType[0],null,BlockConstructionStage.PIPELINE_SETUP_CREATE,1,0L,0L,0L,checksum,CachingStrategy.newDefaultStrategy());
out.flush();
out.close();
String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(sndNode,0);
File dir1=MiniDFSCluster.getRbwDir(storageDir,bpid);
storageDir=cluster.getInstanceStorageDir(sndNode,1);
File dir2=MiniDFSCluster.getRbwDir(storageDir,bpid);
while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
Thread.sleep(100);
}
fs.setReplication(fileName,(short)2);
DFSTestUtil.waitReplication(fs,fileName,(short)1);
fs.delete(fileName,false);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test to check that a DN goes down when all its volumes have failed.
*/
@Test public void testShutdown() throws Exception {
if (System.getProperty("os.name").startsWith("Windows")) {
return;
}
cluster.startDataNodes(conf,2,true,null,null);
cluster.waitActive();
final int dnIndex=0;
String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(dnIndex,0);
File dir1=MiniDFSCluster.getRbwDir(storageDir,bpid);
storageDir=cluster.getInstanceStorageDir(dnIndex,1);
File dir2=MiniDFSCluster.getRbwDir(storageDir,bpid);
try {
assertTrue("Couldn't chmod local vol",dir1.setReadOnly());
assertTrue("Couldn't chmod local vol",dir2.setReadOnly());
DataNode dn=cluster.getDataNodes().get(dnIndex);
for (int i=0; dn.isDatanodeUp(); i++) {
Path fileName=new Path("/test.txt" + i);
DFSTestUtil.createFile(fs,fileName,1024,(short)2,1L);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
fs.delete(fileName,true);
}
}
finally {
FileUtil.setWritable(dir1,true);
FileUtil.setWritable(dir2,true);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Check that the permissions of the local DN directories are as expected.
*/
@Test public void testLocalDirs() throws Exception {
Configuration conf=new Configuration();
final String permStr=conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
FsPermission expected=new FsPermission(permStr);
FileSystem localFS=FileSystem.getLocal(conf);
for ( DataNode dn : cluster.getDataNodes()) {
for ( FsVolumeSpi v : dn.getFSDataset().getVolumes()) {
String dir=v.getBasePath();
Path dataDir=new Path(dir);
FsPermission actual=localFS.getFileStatus(dataDir).getPermission();
assertEquals("Permission for dir: " + dataDir + ", is "+ actual+ ", while expected is "+ expected,expected,actual);
}
}
}
InternalCallVerifier BooleanVerifier PublicFieldVerifier
/**
* Checks whether {@link DataNode#checkDiskErrorAsync()} is being called or not.
* Before refactoring the code the above function was not getting called
* @throws IOException, InterruptedException
*/
@Test public void testcheckDiskError() throws IOException, InterruptedException {
if (cluster.getDataNodes().size() <= 0) {
cluster.startDataNodes(conf,1,true,null,null);
cluster.waitActive();
}
DataNode dataNode=cluster.getDataNodes().get(0);
long slackTime=dataNode.checkDiskErrorInterval / 2;
dataNode.checkDiskErrorAsync();
Thread.sleep(dataNode.checkDiskErrorInterval);
long lastDiskErrorCheck=dataNode.getLastDiskErrorCheck();
assertTrue("Disk Error check is not performed within " + dataNode.checkDiskErrorInterval + " ms",((Time.monotonicNow() - lastDiskErrorCheck) < (dataNode.checkDiskErrorInterval + slackTime)));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=600000) public void testUncachingBlocksBeforeCachingFinishes() throws Exception {
LOG.info("beginning testUncachingBlocksBeforeCachingFinishes");
final int NUM_BLOCKS=5;
DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd);
final Path testFile=new Path("/testCacheBlock");
final long testFileLen=BLOCK_SIZE * NUM_BLOCKS;
DFSTestUtil.createFile(fs,testFile,testFileLen,(short)1,0xABBAl);
HdfsBlockLocation[] locs=(HdfsBlockLocation[])fs.getFileBlockLocations(testFile,0,testFileLen);
assertEquals("Unexpected number of blocks",NUM_BLOCKS,locs.length);
final long[] blockSizes=getBlockSizes(locs);
final long cacheCapacity=fsd.getCacheCapacity();
long cacheUsed=fsd.getCacheUsed();
long current=0;
assertEquals("Unexpected cache capacity",CACHE_CAPACITY,cacheCapacity);
assertEquals("Unexpected amount of cache used",current,cacheUsed);
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator(){
@Override public void mlock( String identifier, ByteBuffer mmap, long length) throws IOException {
LOG.info("An mlock operation is starting on " + identifier);
try {
Thread.sleep(3000);
}
catch ( InterruptedException e) {
Assert.fail();
}
}
}
);
for (int i=0; i < NUM_BLOCKS; i++) {
setHeartbeatResponse(cacheBlock(locs[i]));
current=DFSTestUtil.verifyExpectedCacheUsage(current + blockSizes[i],i + 1,fsd);
}
setHeartbeatResponse(new DatanodeCommand[]{getResponse(locs,DatanodeProtocol.DNA_UNCACHE)});
current=DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd);
LOG.info("finishing testUncachingBlocksBeforeCachingFinishes");
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testReCacheAfterUncache() throws Exception {
final int TOTAL_BLOCKS_PER_CACHE=Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE);
BlockReaderTestUtil.enableHdfsCachingTracing();
Assert.assertEquals(0,CACHE_CAPACITY % BLOCK_SIZE);
final Path SMALL_FILE=new Path("/smallFile");
DFSTestUtil.createFile(fs,SMALL_FILE,BLOCK_SIZE,(short)1,0xcafe);
final Path BIG_FILE=new Path("/bigFile");
DFSTestUtil.createFile(fs,BIG_FILE,TOTAL_BLOCKS_PER_CACHE * BLOCK_SIZE,(short)1,0xbeef);
final DistributedFileSystem dfs=cluster.getFileSystem();
dfs.addCachePool(new CachePoolInfo("pool"));
final long bigCacheDirectiveId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(BIG_FILE).setReplication((short)1).build());
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
MetricsRecordBuilder dnMetrics=getMetrics(dn.getMetrics().name());
long blocksCached=MetricsAsserts.getLongCounter("BlocksCached",dnMetrics);
if (blocksCached != TOTAL_BLOCKS_PER_CACHE) {
LOG.info("waiting for " + TOTAL_BLOCKS_PER_CACHE + " to "+ "be cached. Right now only "+ blocksCached+ " blocks are cached.");
return false;
}
LOG.info(TOTAL_BLOCKS_PER_CACHE + " blocks are now cached.");
return true;
}
}
,1000,30000);
final long shortCacheDirectiveId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(SMALL_FILE).setReplication((short)1).build());
Thread.sleep(10000);
MetricsRecordBuilder dnMetrics=getMetrics(dn.getMetrics().name());
Assert.assertEquals(TOTAL_BLOCKS_PER_CACHE,MetricsAsserts.getLongCounter("BlocksCached",dnMetrics));
dfs.removeCacheDirective(bigCacheDirectiveId);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
RemoteIterator iter;
try {
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().build());
CacheDirectiveEntry entry;
do {
entry=iter.next();
}
while (entry.getInfo().getId() != shortCacheDirectiveId);
if (entry.getStats().getFilesCached() != 1) {
LOG.info("waiting for directive " + shortCacheDirectiveId + " to be cached. stats = "+ entry.getStats());
return false;
}
LOG.info("directive " + shortCacheDirectiveId + " has been cached.");
}
catch ( IOException e) {
Assert.fail("unexpected exception" + e.toString());
}
return true;
}
}
,1000,30000);
dfs.removeCacheDirective(shortCacheDirectiveId);
}
APIUtilityVerifier InternalCallVerifier NullVerifier
/**
* Verify that the NameNode can learn about new storages from incremental
* block reports.
* This tests the fix for the error condition seen in HDFS-6904.
* @throws IOException
* @throws InterruptedException
*/
@Test(timeout=60000) public void testNnLearnsNewStorages() throws IOException, InterruptedException {
final String newStorageUuid=UUID.randomUUID().toString();
final DatanodeStorage newStorage=new DatanodeStorage(newStorageUuid);
StorageReceivedDeletedBlocks[] reports=makeReportForReceivedBlock(getDummyBlock(),newStorage);
cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg,poolId,reports);
DatanodeStorageInfo storageInfo=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn0.getDatanodeId()).getStorageInfo(newStorageUuid);
assertNotNull(storageInfo);
}
APIUtilityVerifier TestInitializer InternalCallVerifier ConditionMatcher HybridVerifier
/**
* Setup a {@link MiniDFSCluster}.
* Create a block with both {@link State#NORMAL} and {@link State#READ_ONLY_SHARED} replicas.
*/
@Before public void setup() throws IOException, InterruptedException {
conf=new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
Configuration[] overlays=new Configuration[NUM_DATANODES];
for (int i=0; i < overlays.length; i++) {
overlays[i]=new Configuration();
if (i == RO_NODE_INDEX) {
overlays[i].setEnum(SimulatedFSDataset.CONFIG_PROPERTY_STATE,i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL);
}
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).dataNodeConfOverlays(overlays).build();
fs=cluster.getFileSystem();
blockManager=cluster.getNameNode().getNamesystem().getBlockManager();
datanodeManager=blockManager.getDatanodeManager();
client=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),cluster.getConfiguration(0));
for (int i=0; i < NUM_DATANODES; i++) {
DataNode dataNode=cluster.getDataNodes().get(i);
validateStorageState(BlockManagerTestUtil.getStorageReportsForDatanode(datanodeManager.getDatanode(dataNode.getDatanodeId())),i == RO_NODE_INDEX ? READ_ONLY_SHARED : NORMAL);
}
DFSTestUtil.createFile(fs,PATH,BLOCK_SIZE,BLOCK_SIZE,BLOCK_SIZE,(short)1,seed);
LocatedBlock locatedBlock=getLocatedBlock();
extendedBlock=locatedBlock.getBlock();
block=extendedBlock.getLocalBlock();
assertThat(locatedBlock.getLocations().length,is(1));
normalDataNode=locatedBlock.getLocations()[0];
readOnlyDataNode=datanodeManager.getDatanode(cluster.getDataNodes().get(RO_NODE_INDEX).getDatanodeId());
assertThat(normalDataNode,is(not(readOnlyDataNode)));
validateNumberReplicas(1);
cluster.injectBlocks(0,RO_NODE_INDEX,Collections.singleton(block));
waitForLocations(2);
}
InternalCallVerifier ConditionMatcher
/**
* Verify that corrupt READ_ONLY_SHARED replicas aren't counted
* towards the corrupt replicas total.
*/
@Test public void testReadOnlyReplicaCorrupt() throws Exception {
client.reportBadBlocks(new LocatedBlock[]{new LocatedBlock(extendedBlock,new DatanodeInfo[]{readOnlyDataNode})});
waitForLocations(1);
NumberReplicas numberReplicas=blockManager.countNodes(block);
assertThat(numberReplicas.corruptReplicas(),is(0));
}
InternalCallVerifier ConditionMatcher
/**
* Verify that the NameNode is able to still use READ_ONLY_SHARED replicas even
* when the single NORMAL replica is offline (and the effective replication count is 0).
*/
@Test public void testNormalReplicaOffline() throws Exception {
cluster.stopDataNode(normalDataNode.getXferAddr());
BlockManagerTestUtil.noticeDeadDatanode(cluster.getNameNode(),normalDataNode.getXferAddr());
NumberReplicas numberReplicas=blockManager.countNodes(block);
assertThat(numberReplicas.liveReplicas(),is(0));
BlockManagerTestUtil.updateState(blockManager);
assertThat(blockManager.getUnderReplicatedBlocksCount(),is(1L));
BlockManagerTestUtil.computeAllPendingWork(blockManager);
DFSTestUtil.waitForReplication(cluster,extendedBlock,1,1,0);
assertThat(getLocatedBlock().getLocations().length,is(2));
validateNumberReplicas(1);
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRefreshNamenodes() throws IOException {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
MiniDFSNNTopology topology=new MiniDFSNNTopology().addNameservice(new NSConf("ns1").addNN(new NNConf(null).setIpcPort(nnPort1))).setFederation(true);
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topology).build();
DataNode dn=cluster.getDataNodes().get(0);
assertEquals(1,dn.getAllBpOs().length);
cluster.addNameNode(conf,nnPort2);
assertEquals(2,dn.getAllBpOs().length);
cluster.addNameNode(conf,nnPort3);
assertEquals(3,dn.getAllBpOs().length);
cluster.addNameNode(conf,nnPort4);
Set nnAddrsFromCluster=Sets.newHashSet();
for (int i=0; i < 4; i++) {
assertTrue(nnAddrsFromCluster.add(cluster.getNameNode(i).getNameNodeAddress()));
}
Set nnAddrsFromDN=Sets.newHashSet();
for ( BPOfferService bpos : dn.getAllBpOs()) {
for ( BPServiceActor bpsa : bpos.getBPServiceActors()) {
assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress()));
}
}
assertEquals("",Joiner.on(",").join(Sets.symmetricDifference(nnAddrsFromCluster,nnAddrsFromDN)));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWriteRead() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
addSomeBlocks(fsdataset);
for (int i=1; i <= NUMBLOCKS; ++i) {
ExtendedBlock b=new ExtendedBlock(bpid,i,0,0);
assertTrue(fsdataset.isValidBlock(b));
assertEquals(blockIdToLen(i),fsdataset.getLength(b));
checkBlockDataAndSize(fsdataset,b,blockIdToLen(i));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidate() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
int bytesAdded=addSomeBlocks(fsdataset);
Block[] deleteBlocks=new Block[2];
deleteBlocks[0]=new Block(1,0,0);
deleteBlocks[1]=new Block(2,0,0);
fsdataset.invalidate(bpid,deleteBlocks);
checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[0]));
checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[1]));
long sizeDeleted=blockIdToLen(1) + blockIdToLen(2);
assertEquals(bytesAdded - sizeDeleted,fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity() - bytesAdded + sizeDeleted,fsdataset.getRemaining());
for (int i=3; i <= NUMBLOCKS; ++i) {
Block b=new Block(i,0,0);
assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid,b)));
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetBlockReport() throws IOException {
SimulatedFSDataset fsdataset=getSimulatedFSDataset();
BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid);
assertEquals(0,blockReport.getNumberOfBlocks());
addSomeBlocks(fsdataset);
blockReport=fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetMetaData() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
ExtendedBlock b=new ExtendedBlock(bpid,1,5,0);
try {
assertTrue(fsdataset.getMetaDataInputStream(b) == null);
assertTrue("Expected an IO exception",false);
}
catch ( IOException e) {
}
addSomeBlocks(fsdataset);
b=new ExtendedBlock(bpid,1,0,0);
InputStream metaInput=fsdataset.getMetaDataInputStream(b);
DataInputStream metaDataInput=new DataInputStream(metaInput);
short version=metaDataInput.readShort();
assertEquals(BlockMetadataHeader.VERSION,version);
DataChecksum checksum=DataChecksum.newDataChecksum(metaDataInput);
assertEquals(DataChecksum.Type.NULL,checksum.getChecksumType());
assertEquals(0,checksum.getChecksumSize());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testInjectionNonEmpty() throws IOException {
SimulatedFSDataset fsdataset=getSimulatedFSDataset();
BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid);
assertEquals(0,blockReport.getNumberOfBlocks());
int bytesAdded=addSomeBlocks(fsdataset);
blockReport=fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
}
fsdataset=null;
SimulatedFSDataset sfsdataset=getSimulatedFSDataset();
bytesAdded+=addSomeBlocks(sfsdataset,NUMBLOCKS + 1);
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
sfsdataset.injectBlocks(bpid,blockReport);
blockReport=sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS * 2,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
assertEquals(blockIdToLen(b.getBlockId()),sfsdataset.getLength(new ExtendedBlock(bpid,b)));
}
assertEquals(bytesAdded,sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity() - bytesAdded,sfsdataset.getRemaining());
conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,10);
try {
sfsdataset=getSimulatedFSDataset();
sfsdataset.addBlockPool(bpid,conf);
sfsdataset.injectBlocks(bpid,blockReport);
assertTrue("Expected an IO exception",false);
}
catch ( IOException e) {
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testInjectionEmpty() throws IOException {
SimulatedFSDataset fsdataset=getSimulatedFSDataset();
BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid);
assertEquals(0,blockReport.getNumberOfBlocks());
int bytesAdded=addSomeBlocks(fsdataset);
blockReport=fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
}
SimulatedFSDataset sfsdataset=getSimulatedFSDataset();
sfsdataset.injectBlocks(bpid,blockReport);
blockReport=sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks());
for ( Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes());
assertEquals(blockIdToLen(b.getBlockId()),sfsdataset.getLength(new ExtendedBlock(bpid,b)));
}
assertEquals(bytesAdded,sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity() - bytesAdded,sfsdataset.getRemaining());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStorageUsage() throws IOException {
final SimulatedFSDataset fsdataset=getSimulatedFSDataset();
assertEquals(fsdataset.getDfsUsed(),0);
assertEquals(fsdataset.getRemaining(),fsdataset.getCapacity());
int bytesAdded=addSomeBlocks(fsdataset);
assertEquals(bytesAdded,fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity() - bytesAdded,fsdataset.getRemaining());
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testSecureNameNode() throws Exception {
MiniDFSCluster cluster=null;
try {
String nnPrincipal=System.getProperty("dfs.namenode.kerberos.principal");
String nnSpnegoPrincipal=System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
String nnKeyTab=System.getProperty("dfs.namenode.keytab.file");
assertNotNull("NameNode principal was not specified",nnPrincipal);
assertNotNull("NameNode SPNEGO principal was not specified",nnSpnegoPrincipal);
assertNotNull("NameNode keytab was not specified",nnKeyTab);
String dnPrincipal=System.getProperty("dfs.datanode.kerberos.principal");
String dnKeyTab=System.getProperty("dfs.datanode.keytab.file");
assertNotNull("DataNode principal was not specified",dnPrincipal);
assertNotNull("DataNode keytab was not specified",dnKeyTab);
Configuration conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,nnPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,nnSpnegoPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nnKeyTab);
conf.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,dnPrincipal);
conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY,dnKeyTab);
conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,"127.0.0.1:1004");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,"127.0.0.1:1006");
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,"700");
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).checkDataNodeAddrConfig(true).build();
cluster.waitActive();
assertTrue(cluster.isDataNodeUp());
}
catch ( Exception ex) {
ex.printStackTrace();
throw ex;
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier IdentityVerifier ConditionMatcher HybridVerifier
/**
* Ensure that storage type and storage state are propagated
* in Storage Reports.
*/
@Test public void testStorageReportHasStorageTypeAndState() throws IOException {
assertNotSame(storageType,StorageType.DEFAULT);
NameNode nn=cluster.getNameNode();
DataNode dn=cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB nnSpy=DataNodeTestUtils.spyOnBposToNN(dn,nn);
DataNodeTestUtils.triggerHeartbeat(dn);
ArgumentCaptor captor=ArgumentCaptor.forClass(StorageReport[].class);
Mockito.verify(nnSpy).sendHeartbeat(any(DatanodeRegistration.class),captor.capture(),anyLong(),anyLong(),anyInt(),anyInt(),anyInt());
StorageReport[] reports=captor.getValue();
for ( StorageReport report : reports) {
assertThat(report.getStorage().getStorageType(),is(storageType));
assertThat(report.getStorage().getState(),is(DatanodeStorage.State.NORMAL));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransferRbw() throws Exception {
final HdfsConfiguration conf=new HdfsConfiguration();
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
try {
cluster.waitActive();
final DistributedFileSystem fs=cluster.getFileSystem();
final Path p=new Path("/foo");
final int size=(1 << 16) + RAN.nextInt(1 << 16);
LOG.info("size = " + size);
final FSDataOutputStream out=fs.create(p,REPLICATION);
final byte[] bytes=new byte[1024];
for (int remaining=size; remaining > 0; ) {
RAN.nextBytes(bytes);
final int len=bytes.length < remaining ? bytes.length : remaining;
out.write(bytes,0,len);
out.hflush();
remaining-=len;
}
final ReplicaBeingWritten oldrbw;
final DataNode newnode;
final DatanodeInfo newnodeinfo;
final String bpid=cluster.getNamesystem().getBlockPoolId();
{
final DataNode oldnode=cluster.getDataNodes().get(0);
oldrbw=getRbw(oldnode,bpid);
LOG.info("oldrbw = " + oldrbw);
cluster.startDataNodes(conf,1,true,null,null);
newnode=cluster.getDataNodes().get(REPLICATION);
final DatanodeInfo oldnodeinfo;
{
final DatanodeInfo[] datatnodeinfos=cluster.getNameNodeRpc().getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertEquals(2,datatnodeinfos.length);
int i=0;
for (DatanodeRegistration dnReg=newnode.getDNRegistrationForBP(bpid); i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++) ;
Assert.assertTrue(i < datatnodeinfos.length);
newnodeinfo=datatnodeinfos[i];
oldnodeinfo=datatnodeinfos[1 - i];
}
final ExtendedBlock b=new ExtendedBlock(bpid,oldrbw.getBlockId(),oldrbw.getBytesAcked(),oldrbw.getGenerationStamp());
final BlockOpResponseProto s=DFSTestUtil.transferRbw(b,DFSClientAdapter.getDFSClient(fs),oldnodeinfo,newnodeinfo);
Assert.assertEquals(Status.SUCCESS,s.getStatus());
}
final ReplicaBeingWritten newrbw=getRbw(newnode,bpid);
LOG.info("newrbw = " + newrbw);
Assert.assertEquals(oldrbw.getBlockId(),newrbw.getBlockId());
Assert.assertEquals(oldrbw.getGenerationStamp(),newrbw.getGenerationStamp());
Assert.assertEquals(oldrbw.getVisibleLength(),newrbw.getVisibleLength());
LOG.info("DONE");
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testThreeUnbalancedVolumes() throws Exception {
@SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy=ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class,null);
List volumes=new ArrayList();
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3);
initPolicy(policy,1.0f);
Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(2),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(2),policy.chooseVolume(volumes,100));
initPolicy(policy,0.0f);
Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testTwoUnbalancedVolumes() throws Exception {
@SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy=ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class,null);
initPolicy(policy,1.0f);
List volumes=new ArrayList();
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3);
Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testFourUnbalancedVolumes() throws Exception {
@SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy=ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class,null);
List volumes=new ArrayList();
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L);
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L + 1);
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3);
volumes.add(Mockito.mock(FsVolumeSpi.class));
Mockito.when(volumes.get(3).getAvailable()).thenReturn(1024L * 1024L * 3);
initPolicy(policy,1.0f);
Assert.assertEquals(volumes.get(2),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(3),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(2),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(3),policy.chooseVolume(volumes,100));
initPolicy(policy,0.0f);
Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100));
Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testRecoverReplicas() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,512);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
try {
FileSystem fs=cluster.getFileSystem();
for (int i=0; i < 4; i++) {
Path fileName=new Path("/test" + i);
DFSTestUtil.createFile(fs,fileName,1,(short)1,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)1);
}
String bpid=cluster.getNamesystem().getBlockPoolId();
DataNode dn=cluster.getDataNodes().get(0);
Iterator replicasItor=dataset(dn).volumeMap.replicas(bpid).iterator();
ReplicaInfo replica=replicasItor.next();
createUnlinkTmpFile(replica,true,true);
createUnlinkTmpFile(replica,false,true);
replica=replicasItor.next();
createUnlinkTmpFile(replica,true,false);
createUnlinkTmpFile(replica,false,false);
replica=replicasItor.next();
createUnlinkTmpFile(replica,true,true);
createUnlinkTmpFile(replica,false,false);
cluster.restartDataNodes();
cluster.waitActive();
dn=cluster.getDataNodes().get(0);
Collection replicas=dataset(dn).volumeMap.replicas(bpid);
Assert.assertEquals(4,replicas.size());
replicasItor=replicas.iterator();
while (replicasItor.hasNext()) {
Assert.assertEquals(ReplicaState.FINALIZED,replicasItor.next().getState());
}
}
finally {
cluster.shutdown();
}
}
TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier
@Before public void setUp() throws IOException {
final DataNode datanode=Mockito.mock(DataNode.class);
storage=Mockito.mock(DataStorage.class);
Configuration conf=new Configuration();
final DNConf dnConf=new DNConf(conf);
when(datanode.getConf()).thenReturn(conf);
when(datanode.getDnConf()).thenReturn(dnConf);
createStorageDirs(storage,conf,NUM_INIT_VOLUMES);
dataset=new FsDatasetImpl(datanode,storage,conf);
assertEquals(NUM_INIT_VOLUMES,dataset.getVolumes().size());
assertEquals(0,dataset.getNumFailedVolumes());
}
IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testAddVolumes() throws IOException {
final int numNewVolumes=3;
final int numExistingVolumes=dataset.getVolumes().size();
final int totalVolumes=numNewVolumes + numExistingVolumes;
List newLocations=new ArrayList();
for (int i=0; i < numNewVolumes; i++) {
String path=BASE_DIR + "/newData" + i;
newLocations.add(StorageLocation.parse(path));
when(storage.getStorageDir(numExistingVolumes + i)).thenReturn(new Storage.StorageDirectory(new File(path)));
}
when(storage.getNumStorageDirs()).thenReturn(totalVolumes);
dataset.addVolumes(newLocations);
assertEquals(totalVolumes,dataset.getVolumes().size());
for (int i=0; i < numNewVolumes; i++) {
assertEquals(newLocations.get(i).getFile().getPath(),dataset.getVolumes().get(numExistingVolumes + i).getBasePath());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for{@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock,long,long)}
*/
@Test public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String bpid=cluster.getNamesystem().getBlockPoolId();
DistributedFileSystem dfs=cluster.getFileSystem();
String filestr="/foo";
Path filepath=new Path(filestr);
DFSTestUtil.createFile(dfs,filepath,1024L,(short)3,0L);
final LocatedBlock locatedblock=getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(),filestr);
final DatanodeInfo[] datanodeinfo=locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
final DataNode datanode=cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
final ExtendedBlock b=locatedblock.getBlock();
final long recoveryid=b.getGenerationStamp() + 1;
final long newlength=b.getNumBytes() - 1;
final FsDatasetSpi> fsdataset=DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri=fsdataset.initReplicaRecovery(new RecoveringBlock(b,null,recoveryid));
final ReplicaInfo replica=FsDatasetTestUtil.fetchReplicaInfo(fsdataset,bpid,b.getBlockId());
Assert.assertEquals(ReplicaState.RUR,replica.getState());
FsDatasetImpl.checkReplicaFiles(replica);
{
final ExtendedBlock tmp=new ExtendedBlock(b.getBlockPoolId(),rri.getBlockId(),rri.getNumBytes() - 1,rri.getGenerationStamp());
try {
fsdataset.updateReplicaUnderRecovery(tmp,recoveryid,newlength);
Assert.fail();
}
catch ( IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
final String storageID=fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(),rri),recoveryid,newlength);
assertTrue(storageID != null);
}
finally {
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test {@link FsDatasetImpl#initReplicaRecovery(String,ReplicaMap,Block,long,long)}
*/
@Test public void testInitReplicaRecovery() throws IOException {
final long firstblockid=10000L;
final long gs=7777L;
final long length=22L;
final ReplicaMap map=new ReplicaMap(this);
String bpid="BP-TEST";
final Block[] blocks=new Block[5];
for (int i=0; i < blocks.length; i++) {
blocks[i]=new Block(firstblockid + i,length,gs);
map.add(bpid,createReplicaInfo(blocks[i]));
}
{
final Block b=blocks[0];
final ReplicaInfo originalInfo=map.get(bpid,b);
final long recoveryid=gs + 1;
final ReplicaRecoveryInfo recoveryInfo=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo,recoveryInfo);
final ReplicaUnderRecovery updatedInfo=(ReplicaUnderRecovery)map.get(bpid,b);
Assert.assertEquals(originalInfo.getBlockId(),updatedInfo.getBlockId());
Assert.assertEquals(recoveryid,updatedInfo.getRecoveryID());
final long recoveryid2=gs + 2;
final ReplicaRecoveryInfo recoveryInfo2=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid2,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo,recoveryInfo2);
final ReplicaUnderRecovery updatedInfo2=(ReplicaUnderRecovery)map.get(bpid,b);
Assert.assertEquals(originalInfo.getBlockId(),updatedInfo2.getBlockId());
Assert.assertEquals(recoveryid2,updatedInfo2.getRecoveryID());
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch ( RecoveryInProgressException ripe) {
System.out.println("GOOD: getting " + ripe);
}
}
{
final long recoveryid=gs + 1;
final Block b=new Block(firstblockid - 1,length,gs);
ReplicaRecoveryInfo r=FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.assertNull("Data-node should not have this replica.",r);
}
{
final long recoveryid=gs - 1;
final Block b=new Block(firstblockid + 1,length,gs);
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
}
catch ( IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
{
final long recoveryid=gs + 1;
final Block b=new Block(firstblockid,length,gs + 1);
try {
FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs");
}
catch ( IOException e) {
e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block=");
}
}
}
UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test public void testRemove(){
try {
map.remove(bpid,null);
fail("Expected exception not thrown");
}
catch ( IllegalArgumentException expected) {
}
Block b=new Block(block);
b.setGenerationStamp(0);
assertNull(map.remove(bpid,b));
b.setGenerationStamp(block.getGenerationStamp());
b.setBlockId(0);
assertNull(map.remove(bpid,b));
assertNotNull(map.remove(bpid,block));
assertNull(map.remove(bpid,0));
map.add(bpid,new FinalizedReplica(block,null,null));
assertNotNull(map.remove(bpid,block.getBlockId()));
}
UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
/**
* Test for ReplicasMap.get(Block) and ReplicasMap.get(long) tests
*/
@Test public void testGet(){
try {
map.get(bpid,null);
fail("Expected exception not thrown");
}
catch ( IllegalArgumentException expected) {
}
assertNotNull(map.get(bpid,block));
Block b=new Block(block);
b.setGenerationStamp(0);
assertNull(map.get(bpid,b));
b.setGenerationStamp(block.getGenerationStamp());
b.setBlockId(0);
assertNull(map.get(bpid,b));
assertNotNull(map.get(bpid,block.getBlockId()));
assertNull(map.get(bpid,0));
}
InternalCallVerifier BooleanVerifier
@Test public void testDeserializeHAToken() throws IOException {
Configuration conf=DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
DataNode dn=mock(DataNode.class);
doReturn(conf).when(dn).getConf();
ServletContext context=mock(ServletContext.class);
doReturn(dn).when(context).getAttribute("datanode");
final Token token=new Token();
DatanodeWebHdfsMethods method=new DatanodeWebHdfsMethods();
Whitebox.setInternalState(method,"context",context);
final Token tok2=method.deserializeToken(token.encodeToUrlString(),LOGICAL_NAME);
Assert.assertTrue(HAUtil.isTokenForLogicalUri(tok2));
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclEntriesOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0640));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,USER,"bar",READ_WRITE),aclEntry(ACCESS,GROUP,READ_WRITE),aclEntry(ACCESS,OTHER,NONE));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo"));
fs.removeAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bar",READ_WRITE),aclEntry(ACCESS,GROUP,READ_WRITE)},returned);
assertPermission((short)010760);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveDefaultAclOnlyDefault() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
fs.removeDefaultAcl(path);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission((short)0750);
assertAclFeature(false);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAcl() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
fs.removeAcl(path);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission((short)0750);
assertAclFeature(false);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetAclOnlyDefault() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testModifyAclEntriesMinimalDefault() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE));
fs.modifyAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetAclMinimal() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0644));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE));
fs.setAcl(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission((short)0640);
assertAclFeature(false);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclNewFileWithMode() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0755));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
Path filePath=new Path(path,"file1");
int bufferSize=cluster.getConfiguration(0).getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_DEFAULT);
fs.create(filePath,new FsPermission((short)0740),false,bufferSize,fs.getDefaultReplication(filePath),fs.getDefaultBlockSize(path),null).close();
AclStatus s=fs.getAclStatus(filePath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned);
assertPermission(filePath,(short)010740);
assertAclFeature(filePath,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclOnlyDefault() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
fs.removeAcl(path);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission((short)0750);
assertAclFeature(false);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclEntriesMinimal() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0760));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_WRITE),aclEntry(ACCESS,OTHER,NONE));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo"),aclEntry(ACCESS,MASK));
fs.removeAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission((short)0760);
assertAclFeature(false);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultMinimalAclNewFile() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE));
fs.setAcl(path,aclSpec);
Path filePath=new Path(path,"file1");
fs.create(filePath).close();
AclStatus s=fs.getAclStatus(filePath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission(filePath,(short)0640);
assertAclFeature(filePath,false);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclMinimalAcl() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0640));
fs.removeAcl(path);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission((short)0640);
assertAclFeature(false);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetAclCustomMask() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0640));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,MASK,ALL),aclEntry(ACCESS,OTHER,NONE));
fs.setAcl(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)},returned);
assertPermission((short)010670);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetPermission() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
fs.setPermission(path,FsPermission.createImmutable((short)0700));
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010700);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetAclStickyBit() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)011770);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testModifyAclEntriesMinimal() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0640));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",READ_WRITE));
fs.modifyAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ_WRITE),aclEntry(ACCESS,GROUP,READ)},returned);
assertPermission((short)010660);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclStickyBit() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
fs.removeAcl(path);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission((short)01750);
assertAclFeature(false);
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSetPermissionCannotSetAclBit() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setPermission(path,FsPermission.createImmutable((short)0700));
assertPermission((short)0700);
fs.setPermission(path,new FsAclPermission(FsPermission.createImmutable((short)0755)));
INode inode=cluster.getNamesystem().getFSDirectory().getNode(path.toUri().getPath(),false);
assertNotNull(inode);
FsPermission perm=inode.getFsPermission();
assertNotNull(perm);
assertEquals(0755,perm.toShort());
assertEquals(0755,perm.toExtendedShort());
assertAclFeature(false);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclNewSymlinkIntermediate() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
Path filePath=new Path(path,"file1");
fs.create(filePath).close();
fs.setPermission(filePath,FsPermission.createImmutable((short)0640));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
Path dirPath=new Path(path,"dir1");
Path linkPath=new Path(dirPath,"link1");
fs.createSymlink(filePath,linkPath,true);
AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)};
AclStatus s=fs.getAclStatus(dirPath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission(dirPath,(short)010750);
assertAclFeature(dirPath,true);
expected=new AclEntry[]{};
s=fs.getAclStatus(linkPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission(linkPath,(short)0640);
assertAclFeature(linkPath,false);
s=fs.getAclStatus(filePath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission(filePath,(short)0640);
assertAclFeature(filePath,false);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclEntries() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo"),aclEntry(DEFAULT,USER,"foo"));
fs.removeAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetAcl() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010770);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclEntriesOnlyDefault() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,USER,"bar",READ_EXECUTE));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo"));
fs.removeAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bar",READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclRenamedDir() throws Exception {
Path dirPath=new Path(path,"dir");
FileSystem.mkdirs(fs,dirPath,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(dirPath,aclSpec);
Path subdirPath=new Path(path,"subdir");
FileSystem.mkdirs(fs,subdirPath,FsPermission.createImmutable((short)0750));
Path renamedSubdirPath=new Path(dirPath,"subdir");
fs.rename(subdirPath,renamedSubdirPath);
AclEntry[] expected=new AclEntry[]{};
AclStatus s=fs.getAclStatus(renamedSubdirPath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission(renamedSubdirPath,(short)0750);
assertAclFeature(renamedSubdirPath,false);
}
InternalCallVerifier EqualityVerifier
@Test public void testModifyAclEntriesStickyBit() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE));
fs.modifyAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)011750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclNewFile() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
Path filePath=new Path(path,"file1");
fs.create(filePath).close();
AclStatus s=fs.getAclStatus(filePath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned);
assertPermission(filePath,(short)010640);
assertAclFeature(filePath,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveDefaultAclOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0640));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE));
fs.setAcl(path,aclSpec);
fs.removeDefaultAcl(path);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned);
assertPermission((short)010770);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveDefaultAclMinimal() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.removeDefaultAcl(path);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission((short)0750);
assertAclFeature(false);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclNewDirIntermediate() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
Path dirPath=new Path(path,"dir1");
Path subdirPath=new Path(dirPath,"subdir1");
fs.mkdirs(subdirPath);
AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)};
AclStatus s=fs.getAclStatus(dirPath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission(dirPath,(short)010750);
assertAclFeature(dirPath,true);
s=fs.getAclStatus(subdirPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission(subdirPath,(short)010750);
assertAclFeature(subdirPath,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetPermissionOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0640));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE));
fs.setAcl(path,aclSpec);
fs.setPermission(path,FsPermission.createImmutable((short)0600));
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)},returned);
assertPermission((short)010600);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclRenamedFile() throws Exception {
Path dirPath=new Path(path,"dir");
FileSystem.mkdirs(fs,dirPath,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(dirPath,aclSpec);
Path filePath=new Path(path,"file1");
fs.create(filePath).close();
fs.setPermission(filePath,FsPermission.createImmutable((short)0640));
Path renamedFilePath=new Path(dirPath,"file1");
fs.rename(filePath,renamedFilePath);
AclEntry[] expected=new AclEntry[]{};
AclStatus s=fs.getAclStatus(renamedFilePath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission(renamedFilePath,(short)0640);
assertAclFeature(renamedFilePath,false);
}
InternalCallVerifier EqualityVerifier
@Test public void testModifyAclEntriesOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0640));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",READ_EXECUTE));
fs.modifyAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned);
assertPermission((short)010750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclNewDir() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
Path dirPath=new Path(path,"dir1");
fs.mkdirs(dirPath);
AclStatus s=fs.getAclStatus(dirPath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission(dirPath,(short)010750);
assertAclFeature(dirPath,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testOnlyAccessAclNewFile() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",ALL));
fs.modifyAclEntries(path,aclSpec);
Path filePath=new Path(path,"file1");
fs.create(filePath).close();
AclStatus s=fs.getAclStatus(filePath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission(filePath,(short)0644);
assertAclFeature(filePath,false);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclEntriesMinimalDefault() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo"),aclEntry(ACCESS,MASK),aclEntry(DEFAULT,USER,"foo"),aclEntry(DEFAULT,MASK));
fs.removeAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetAclOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0640));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE));
fs.setAcl(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)},returned);
assertPermission((short)010640);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testModifyAclEntriesCustomMask() throws IOException {
fs.create(path).close();
fs.setPermission(path,FsPermission.createImmutable((short)0640));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,MASK,NONE));
fs.modifyAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ)},returned);
assertPermission((short)010600);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testModifyAclEntriesOnlyDefault() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",READ_EXECUTE));
fs.modifyAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclNewFileIntermediate() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
Path dirPath=new Path(path,"dir1");
Path filePath=new Path(dirPath,"file1");
fs.create(filePath).close();
AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)};
AclStatus s=fs.getAclStatus(dirPath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission(dirPath,(short)010750);
assertAclFeature(dirPath,true);
expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)};
s=fs.getAclStatus(filePath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission(filePath,(short)010640);
assertAclFeature(filePath,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testOnlyAccessAclNewDir() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",ALL));
fs.modifyAclEntries(path,aclSpec);
Path dirPath=new Path(path,"dir1");
fs.mkdirs(dirPath);
AclStatus s=fs.getAclStatus(dirPath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
assertPermission(dirPath,(short)0755);
assertAclFeature(dirPath,false);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclNewDirWithMode() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0755));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
Path dirPath=new Path(path,"dir1");
fs.mkdirs(dirPath,new FsPermission((short)0740));
AclStatus s=fs.getAclStatus(dirPath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,READ_EXECUTE)},returned);
assertPermission(dirPath,(short)010740);
assertAclFeature(dirPath,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclEntriesStickyBit() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo"),aclEntry(DEFAULT,USER,"foo"));
fs.removeAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)011750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultMinimalAclNewDir() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE));
fs.setAcl(path,aclSpec);
Path dirPath=new Path(path,"dir1");
fs.mkdirs(dirPath);
AclStatus s=fs.getAclStatus(dirPath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission(dirPath,(short)010750);
assertAclFeature(dirPath,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveDefaultAcl() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
fs.removeDefaultAcl(path);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned);
assertPermission((short)010770);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetAclMinimalDefault() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE));
fs.setAcl(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetPermissionOnlyDefault() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
fs.setPermission(path,FsPermission.createImmutable((short)0700));
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010700);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testModifyAclEntries() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE));
fs.modifyAclEntries(path,aclSpec);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010750);
assertAclFeature(true);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveDefaultAclStickyBit() throws IOException {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL));
fs.setAcl(path,aclSpec);
fs.removeDefaultAcl(path);
AclStatus s=fs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned);
assertPermission((short)011770);
assertAclFeature(true);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Tests for setting xattr
* 1. Set xattr with XAttrSetFlag.CREATE|XAttrSetFlag.REPLACE flag.
* 2. Set xattr with illegal name.
* 3. Set xattr without XAttrSetFlag.
* 4. Set xattr and total number exceeds max limit.
* 5. Set xattr and name is too long.
* 6. Set xattr and value is too long.
*/
@Test(timeout=120000) public void testSetXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(name1));
fs.removeXAttr(path,name1);
try {
fs.setXAttr(path,null,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with null name should fail.");
}
catch ( NullPointerException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e);
}
catch ( RemoteException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e);
}
try {
fs.setXAttr(path,"user.",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with empty name should fail.");
}
catch ( RemoteException e) {
assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e);
}
catch ( HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e);
}
try {
fs.setXAttr(path,"a1",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with invalid name prefix or without " + "name prefix should fail.");
}
catch ( RemoteException e) {
assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e);
}
catch ( HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e);
}
fs.setXAttr(path,name1,value1);
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(name1));
fs.removeXAttr(path,name1);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(name1));
fs.removeXAttr(path,name1);
fs.setXAttr(path,name1,value1);
fs.setXAttr(path,name2,value2);
fs.setXAttr(path,name3,null);
try {
fs.setXAttr(path,name4,null);
Assert.fail("Setting xattr should fail if total number of xattrs " + "for inode exceeds max limit.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr",e);
}
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
fs.removeXAttr(path,name3);
String longName="user.0123456789abcdefX";
try {
fs.setXAttr(path,longName,null);
Assert.fail("Setting xattr should fail if name is too long.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big",e);
GenericTestUtils.assertExceptionContains("total size is 17",e);
}
byte[] longValue=new byte[MAX_SIZE];
try {
fs.setXAttr(path,"user.a",longValue);
Assert.fail("Setting xattr should fail if value is too long.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big",e);
GenericTestUtils.assertExceptionContains("total size is 17",e);
}
String name="user.111";
byte[] value=new byte[MAX_SIZE - 3];
fs.setXAttr(path,name,value);
}
InternalCallVerifier EqualityVerifier
/**
* Steps:
* 1) Set xattrs on a file.
* 2) Remove xattrs from that file.
* 3) Save a checkpoint and restart NN.
* 4) Set xattrs again on the same file.
* 5) Remove xattrs from that file.
* 6) Restart NN without saving a checkpoint.
* 7) Set xattrs again on the same file.
*/
@Test(timeout=120000) public void testCleanupXAttrs() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
restart(true);
initFileSystem();
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
restart(false);
initFileSystem();
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
}
InternalCallVerifier EqualityVerifier
/**
* Tests for removing xattr
* 1. Remove xattr.
* 2. Restart NN and save checkpoint scenarios.
*/
@Test(timeout=120000) public void testRemoveXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name3,null,EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(new byte[0],xattrs.get(name3));
restart(false);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(new byte[0],xattrs.get(name3));
restart(true);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(new byte[0],xattrs.get(name3));
fs.removeXAttr(path,name3);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testRenameFileWithXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
Path renamePath=new Path(path.toString() + "-rename");
fs.rename(path,renamePath);
Map xattrs=fs.getXAttrs(renamePath);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
fs.removeXAttr(renamePath,name1);
fs.removeXAttr(renamePath,name2);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Tests for replacing xattr
* 1. Replace an xattr using XAttrSetFlag.REPLACE.
* 2. Replace an xattr which doesn't exist and expect an exception.
* 3. Create multiple xattrs and replace some.
* 4. Restart NN and save checkpoint scenarios.
*/
@Test(timeout=120000) public void testReplaceXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.REPLACE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(name1));
fs.removeXAttr(path,name1);
try {
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.REPLACE));
Assert.fail("Replacing xattr which does not exist should fail.");
}
catch ( IOException e) {
}
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.REPLACE));
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(false);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(true);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Tests for creating xattr
* 1. Create an xattr using XAttrSetFlag.CREATE.
* 2. Create an xattr which already exists and expect an exception.
* 3. Create multiple xattrs.
* 4. Restart NN and save checkpoint scenarios.
*/
@Test(timeout=120000) public void testCreateXAttr() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(name1));
fs.removeXAttr(path,name1);
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),0);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
try {
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
Assert.fail("Creating xattr which already exists should fail.");
}
catch ( IOException e) {
}
fs.removeXAttr(path,name1);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.CREATE));
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(false);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
restart(true);
initFileSystem();
xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(new byte[0],xattrs.get(name2));
fs.removeXAttr(path,name1);
fs.removeXAttr(path,name2);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testRawXAttrs() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
{
final byte[] value=fs.getXAttr(rawPath,raw1);
Assert.assertArrayEquals(value,value1);
}
{
final Map xattrs=fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(value1,xattrs.get(raw1));
fs.removeXAttr(rawPath,raw1);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
final Map xattrs=fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(),1);
Assert.assertArrayEquals(newValue1,xattrs.get(raw1));
fs.removeXAttr(rawPath,raw1);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(rawPath);
assertTrue(xattrNames.contains(raw1));
assertTrue(xattrNames.contains(raw2));
assertTrue(xattrNames.size() == 2);
fs.removeXAttr(rawPath,raw1);
fs.removeXAttr(rawPath,raw2);
}
{
fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
fs.removeXAttr(rawPath,raw1);
fs.removeXAttr(rawPath,raw2);
}
{
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
try {
userFs.setXAttr(path,raw1,value1);
fail("setXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.setXAttr(rawPath,raw1,value1);
fail("setXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttrs(rawPath);
fail("getXAttrs should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttrs(path);
fail("getXAttrs should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(rawPath,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(path,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
return null;
}
}
);
}
{
fs.setXAttr(rawPath,raw1,value1);
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
try {
userFs.getXAttr(rawPath,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
try {
userFs.getXAttr(path,raw1);
fail("getXAttr should have thrown");
}
catch ( AccessControlException e) {
}
final List xattrNames=userFs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
try {
userFs.listXAttrs(rawPath);
fail("listXAttrs on raw path should have thrown");
}
catch ( AccessControlException e) {
}
return null;
}
}
);
fs.removeXAttr(rawPath,raw1);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the listXAttrs api.
* listXAttrs on a path that doesn't exist.
* listXAttrs on a path with no XAttrs
* Check basic functionality.
* Check that read access to parent dir is not enough to get xattr names
* Check that write access to the parent dir is not enough to get names
* Check that execute/scan access to the parent dir is sufficient to get
* xattr names.
*/
@Test(timeout=120000) public void testListXAttrs() throws Exception {
final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"});
try {
fs.listXAttrs(path);
fail("expected FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("cannot find",e);
}
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
final List noXAttrs=fs.listXAttrs(path);
assertTrue("XAttrs were found?",noXAttrs.size() == 0);
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
final List xattrNames=fs.listXAttrs(path);
assertTrue(xattrNames.contains(name1));
assertTrue(xattrNames.contains(name2));
assertTrue(xattrNames.size() == 2);
fs.setPermission(path,new FsPermission((short)0704));
final Path childDir=new Path(path,"child" + pathCount);
FileSystem.mkdirs(fs,childDir,FsPermission.createImmutable((short)0700));
fs.setXAttr(childDir,name1,"1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
}
);
fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0702));
try {
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
}
);
fail("expected IOException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied",e);
}
fs.setPermission(path,new FsPermission((short)0701));
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
}
);
fs.setXAttr(childDir,"trusted.myxattr","1234".getBytes());
user.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final FileSystem userFs=dfsCluster.getFileSystem();
assertTrue(userFs.listXAttrs(childDir).size() == 1);
return null;
}
}
);
assertTrue(fs.listXAttrs(childDir).size() == 2);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testXAttrAcl() throws Exception {
FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750));
fs.setOwner(path,BRUCE.getUserName(),null);
FileSystem fsAsBruce=createFileSystem(BRUCE);
FileSystem fsAsDiana=createFileSystem(DIANA);
fsAsBruce.setXAttr(path,name1,value1);
Map xattrs;
try {
xattrs=fsAsDiana.getXAttrs(path);
Assert.fail("Diana should not have read access to get xattrs");
}
catch ( AccessControlException e) {
}
fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),READ)));
xattrs=fsAsDiana.getXAttrs(path);
Assert.assertArrayEquals(value1,xattrs.get(name1));
try {
fsAsDiana.removeXAttr(path,name1);
Assert.fail("Diana should not have write access to remove xattrs");
}
catch ( AccessControlException e) {
}
try {
fsAsDiana.setXAttr(path,name2,value2);
Assert.fail("Diana should not have write access to set xattrs");
}
catch ( AccessControlException e) {
}
fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),ALL)));
fsAsDiana.setXAttr(path,name2,value2);
Assert.assertArrayEquals(value2,fsAsDiana.getXAttrs(path).get(name2));
fsAsDiana.removeXAttr(path,name1);
fsAsDiana.removeXAttr(path,name2);
}
InternalCallVerifier EqualityVerifier
/**
* Test adding new blocks. Restart the NameNode in the test to make sure the
* AddBlockOp in the editlog is applied correctly.
*/
@Test public void testAddBlock() throws Exception {
DistributedFileSystem fs=cluster.getFileSystem();
final Path file1=new Path("/file1");
final Path file2=new Path("/file2");
final Path file3=new Path("/file3");
final Path file4=new Path("/file4");
DFSTestUtil.createFile(fs,file1,BLOCKSIZE - 1,REPLICATION,0L);
DFSTestUtil.createFile(fs,file2,BLOCKSIZE,REPLICATION,0L);
DFSTestUtil.createFile(fs,file3,BLOCKSIZE * 2 - 1,REPLICATION,0L);
DFSTestUtil.createFile(fs,file4,BLOCKSIZE * 2,REPLICATION,0L);
cluster.restartNameNode(true);
FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
INodeFile file1Node=fsdir.getINode4Write(file1.toString()).asFile();
BlockInfo[] file1Blocks=file1Node.getBlocks();
assertEquals(1,file1Blocks.length);
assertEquals(BLOCKSIZE - 1,file1Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE,file1Blocks[0].getBlockUCState());
INodeFile file2Node=fsdir.getINode4Write(file2.toString()).asFile();
BlockInfo[] file2Blocks=file2Node.getBlocks();
assertEquals(1,file2Blocks.length);
assertEquals(BLOCKSIZE,file2Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE,file2Blocks[0].getBlockUCState());
INodeFile file3Node=fsdir.getINode4Write(file3.toString()).asFile();
BlockInfo[] file3Blocks=file3Node.getBlocks();
assertEquals(2,file3Blocks.length);
assertEquals(BLOCKSIZE,file3Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE,file3Blocks[0].getBlockUCState());
assertEquals(BLOCKSIZE - 1,file3Blocks[1].getNumBytes());
assertEquals(BlockUCState.COMPLETE,file3Blocks[1].getBlockUCState());
INodeFile file4Node=fsdir.getINode4Write(file4.toString()).asFile();
BlockInfo[] file4Blocks=file4Node.getBlocks();
assertEquals(2,file4Blocks.length);
assertEquals(BLOCKSIZE,file4Blocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE,file4Blocks[0].getBlockUCState());
assertEquals(BLOCKSIZE,file4Blocks[1].getNumBytes());
assertEquals(BlockUCState.COMPLETE,file4Blocks[1].getBlockUCState());
}
InternalCallVerifier EqualityVerifier
/**
* Test adding new blocks but without closing the corresponding the file
*/
@Test public void testAddBlockUC() throws Exception {
DistributedFileSystem fs=cluster.getFileSystem();
final Path file1=new Path("/file1");
DFSTestUtil.createFile(fs,file1,BLOCKSIZE - 1,REPLICATION,0L);
FSDataOutputStream out=null;
try {
out=fs.append(file1);
String appendContent="appending-content";
out.writeBytes(appendContent);
((DFSOutputStream)out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
cluster.restartNameNode(true);
FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
INodeFile fileNode=fsdir.getINode4Write(file1.toString()).asFile();
BlockInfo[] fileBlocks=fileNode.getBlocks();
assertEquals(2,fileBlocks.length);
assertEquals(BLOCKSIZE,fileBlocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE,fileBlocks[0].getBlockUCState());
assertEquals(appendContent.length() - 1,fileBlocks[1].getNumBytes());
assertEquals(BlockUCState.UNDER_CONSTRUCTION,fileBlocks[1].getBlockUCState());
}
finally {
if (out != null) {
out.close();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
final String src="/testAddBlockRetryShouldReturnBlockWithLocations";
NamenodeProtocols nameNodeRpc=cluster.getNameNodeRpc();
nameNodeRpc.create(src,FsPermission.getFileDefault(),"clientName",new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)),true,(short)3,1024,null);
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
assertTrue("Block locations should be present",lb1.getLocations().length > 0);
cluster.restartNameNode();
nameNodeRpc=cluster.getNameNodeRpc();
LocatedBlock lb2=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
assertEquals("Blocks are not equal",lb1.getBlock(),lb2.getBlock());
assertTrue("Wrong locations with retry",lb2.getLocations().length > 0);
}
BranchVerifier InternalCallVerifier EqualityVerifier
/**
* Retry addBlock() while another thread is in chooseTarget().
* See HDFS-4452.
*/
@Test public void testRetryAddBlockWhileInChooseTarget() throws Exception {
final String src="/testRetryAddBlockWhileInChooseTarget";
FSNamesystem ns=cluster.getNamesystem();
BlockManager spyBM=spy(ns.getBlockManager());
final NamenodeProtocols nn=cluster.getNameNodeRpc();
Class extends FSNamesystem> nsClass=ns.getClass();
Field bmField=nsClass.getDeclaredField("blockManager");
bmField.setAccessible(true);
bmField.set(ns,spyBM);
doAnswer(new Answer(){
@Override public DatanodeStorageInfo[] answer( InvocationOnMock invocation) throws Throwable {
LOG.info("chooseTarget for " + src);
DatanodeStorageInfo[] ret=(DatanodeStorageInfo[])invocation.callRealMethod();
count++;
if (count == 1) {
LOG.info("Starting second addBlock for " + src);
nn.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
LocatedBlocks lbs=nn.getBlockLocations(src,0,Long.MAX_VALUE);
assertEquals("Must be one block",1,lbs.getLocatedBlocks().size());
lb2=lbs.get(0);
assertEquals("Wrong replication",REPLICATION,lb2.getLocations().length);
}
return ret;
}
}
).when(spyBM).chooseTarget(Mockito.anyString(),Mockito.anyInt(),Mockito.any(),Mockito.>any(),Mockito.anyLong(),Mockito.>any());
nn.create(src,FsPermission.getFileDefault(),"clientName",new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)),true,(short)3,1024,null);
LOG.info("Starting first addBlock for " + src);
nn.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null);
LocatedBlocks lbs=nn.getBlockLocations(src,0,Long.MAX_VALUE);
assertEquals("Must be one block",1,lbs.getLocatedBlocks().size());
lb1=lbs.get(0);
assertEquals("Wrong replication",REPLICATION,lb1.getLocations().length);
assertEquals("Blocks are not equal",lb1.getBlock(),lb2.getBlock());
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* start MiniDFScluster, try formatting with different settings
* @throws IOException
* @throws InterruptedException
*/
@Test public void testAllowFormat() throws IOException {
LOG.info("--starting mini cluster");
NameNode nn;
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,true);
cluster=new MiniDFSCluster.Builder(config).manageDataDfsDirs(false).manageNameDfsDirs(false).build();
cluster.waitActive();
assertNotNull(cluster);
nn=cluster.getNameNode();
assertNotNull(nn);
LOG.info("Mini cluster created OK");
LOG.info("Verifying format will fail with allowformat false");
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,false);
try {
cluster.shutdown();
NameNode.format(config);
fail("Format succeeded, when it should have failed");
}
catch ( IOException e) {
assertTrue("Exception was not about formatting Namenode",e.getMessage().startsWith("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY));
LOG.info("Expected failure: " + StringUtils.stringifyException(e));
LOG.info("Done verifying format will fail with allowformat false");
}
LOG.info("Verifying format will succeed with allowformat true");
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,true);
NameNode.format(config);
LOG.info("Done verifying format will succeed with allowformat true");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWebHdfsAuditLogger() throws IOException, URISyntaxException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName());
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
GetOpParam.Op op=GetOpParam.Op.GETFILESTATUS;
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
URI uri=new URI("http",NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),"/webhdfs/v1/",op.toQueryString(),null);
HttpURLConnection conn=(HttpURLConnection)uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(200,conn.getResponseCode());
conn.disconnect();
assertEquals(1,DummyAuditLogger.logCount);
assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr);
conn=(HttpURLConnection)uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For","1.1.1.1");
conn.connect();
assertEquals(200,conn.getResponseCode());
conn.disconnect();
assertEquals(2,DummyAuditLogger.logCount);
assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr);
conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS,"127.0.0.1");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
conn=(HttpURLConnection)uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For","1.1.1.1");
conn.connect();
assertEquals(200,conn.getResponseCode());
conn.disconnect();
assertEquals(3,DummyAuditLogger.logCount);
assertEquals("1.1.1.1",DummyAuditLogger.remoteAddr);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
/**
* test that denied access via webhdfs puts proper entry in audit log
*/
@Test public void testAuditWebHdfsDenied() throws Exception {
final Path file=new Path(fnames[0]);
fs.setPermission(file,new FsPermission((short)0600));
fs.setOwner(file,"root",null);
setupAuditLogs();
try {
WebHdfsFileSystem webfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo,conf,WebHdfsFileSystem.SCHEME);
InputStream istream=webfs.open(file);
int val=istream.read();
fail("open+read must not succeed, got " + val);
}
catch ( AccessControlException E) {
System.out.println("got access denied, as expected.");
}
verifyAuditLogsRepeat(false,2);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that allowed stat puts proper entry in audit log
*/
@Test public void testAuditAllowedStat() throws Exception {
final Path file=new Path(fnames[0]);
FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf);
setupAuditLogs();
FileStatus st=userfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file",st != null && st.isFile());
}
TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier
@Before public void setupCluster() throws Exception {
configureAuditLogs();
conf=new HdfsConfiguration();
final long precision=1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY,useAsyncLog);
util=new DFSTestUtil.Builder().setName("TestAuditAllowed").setNumFiles(20).build();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs=cluster.getFileSystem();
util.createFiles(fs,fileName);
Logger logger=((Log4JLogger)FSNamesystem.auditLog).getLogger();
@SuppressWarnings("unchecked") List appenders=Collections.list(logger.getAllAppenders());
assertEquals(1,appenders.size());
assertEquals(useAsyncLog,appenders.get(0) instanceof AsyncAppender);
fnames=util.getFileNames(fileName);
util.waitReplication(fs,fileName,(short)3);
userGroupInfo=UserGroupInformation.createUserForTesting(username,groups);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that stat via webhdfs puts proper entry in audit log
*/
@Test public void testAuditWebHdfsStat() throws Exception {
final Path file=new Path(fnames[0]);
fs.setPermission(file,new FsPermission((short)0644));
fs.setOwner(file,"root",null);
setupAuditLogs();
WebHdfsFileSystem webfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo,conf,WebHdfsFileSystem.SCHEME);
FileStatus st=webfs.getFileStatus(file);
verifyAuditLogs(true);
assertTrue("failed to stat file",st != null && st.isFile());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that access via webhdfs puts proper entry in audit log
*/
@Test public void testAuditWebHdfs() throws Exception {
final Path file=new Path(fnames[0]);
fs.setPermission(file,new FsPermission((short)0644));
fs.setOwner(file,"root",null);
setupAuditLogs();
WebHdfsFileSystem webfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo,conf,WebHdfsFileSystem.SCHEME);
InputStream istream=webfs.open(file);
int val=istream.read();
istream.close();
verifyAuditLogsRepeat(true,3);
assertTrue("failed to read from file",val >= 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test that allowed operation puts proper entry in audit log
*/
@Test public void testAuditAllowed() throws Exception {
final Path file=new Path(fnames[0]);
FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf);
setupAuditLogs();
InputStream istream=userfs.open(file);
int val=istream.read();
istream.close();
verifyAuditLogs(true);
assertTrue("failed to read from file",val >= 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Ensure that the backupnode will tail edits from the NN
* and keep in sync, even while the NN rolls, checkpoints
* occur, etc.
*/
@Test public void testBackupNodeTailsEdits() throws Exception {
Configuration conf=new HdfsConfiguration();
HAUtil.setAllowStandbyReads(conf,true);
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
BackupNode backup=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
fileSys=cluster.getFileSystem();
backup=startBackupNode(conf,StartupOption.BACKUP,1);
BackupImage bnImage=(BackupImage)backup.getFSImage();
testBNInSync(cluster,backup,1);
NameNode nn=cluster.getNameNode();
NamenodeProtocols nnRpc=nn.getRpcServer();
nnRpc.rollEditLog();
assertEquals(bnImage.getEditLog().getCurSegmentTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId());
testBNInSync(cluster,backup,2);
long nnImageBefore=nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
backup.doCheckpoint();
long nnImageAfter=nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
assertTrue("nn should have received new checkpoint. before: " + nnImageBefore + " after: "+ nnImageAfter,nnImageAfter > nnImageBefore);
testBNInSync(cluster,backup,3);
StorageDirectory sd=bnImage.getStorage().getStorageDir(0);
backup.stop();
backup=null;
EditLogFile editsLog=FSImageTestUtil.findLatestEditsLog(sd);
assertEquals(editsLog.getFirstTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId());
assertTrue("Should not have finalized " + editsLog,editsLog.isInProgress());
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
backup=startBackupNode(conf,StartupOption.BACKUP,1);
testBNInSync(cluster,backup,4);
assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down",false));
backup.stop(false);
assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down-2")));
}
finally {
LOG.info("Shutting down...");
if (backup != null) backup.stop();
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
assertStorageDirsMatch(cluster.getNameNode(),backup);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that a file can be read both from NameNode and BackupNode.
*/
@Test public void testCanReadData() throws IOException {
Path file1=new Path("/fileToRead.dat");
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
BackupNode backup=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fileSys=cluster.getFileSystem();
long txid=cluster.getNameNodeRpc().getTransactionID();
backup=startBackupNode(conf,StartupOption.BACKUP,1);
waitCheckpointDone(cluster,txid);
String rpcAddrKeyPreffix=DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster";
String nnAddr=cluster.getNameNode().getNameNodeAddressHostPortString();
conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
String bnAddr=backup.getNameNodeAddressHostPortString();
conf.set(DFSConfigKeys.DFS_NAMESERVICES,"bnCluster");
conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID,"bnCluster");
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster","nnActive, nnBackup");
conf.set(rpcAddrKeyPreffix + ".nnActive",nnAddr);
conf.set(rpcAddrKeyPreffix + ".nnBackup",bnAddr);
cluster.startDataNodes(conf,3,true,StartupOption.REGULAR,null);
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,(short)3,seed);
FileSystem bnFS=FileSystem.get(new Path("hdfs://" + bnAddr).toUri(),conf);
String nnData=DFSTestUtil.readFile(fileSys,file1);
String bnData=DFSTestUtil.readFile(bnFS,file1);
assertEquals("Data read from BackupNode and NameNode is not the same.",nnData,bnData);
}
catch ( IOException e) {
LOG.error("Error in TestBackupNode: ",e);
assertTrue(e.getLocalizedMessage(),false);
}
finally {
if (fileSys != null) fileSys.close();
if (backup != null) backup.stop();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode=cluster.getNameNodeRpc();
final Path p=new Path(BASE_DIR,"file2.dat");
final String src=p.toString();
final FSDataOutputStream out=TestFileCreation.createFile(hdfs,p,3);
int len=BLOCK_SIZE >>> 1;
writeFile(p,out,len);
for (int i=1; i < NUM_BLOCKS; ) {
final LocatedBlocks lb=namenode.getBlockLocations(src,0,len);
final List blocks=lb.getLocatedBlocks();
assertEquals(i,blocks.size());
final Block b=blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoUnderConstruction);
if (++i < NUM_BLOCKS) {
writeFile(p,out,BLOCK_SIZE);
len+=BLOCK_SIZE;
}
}
out.close();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testWaitForCachedReplicas() throws Exception {
FileSystemTestHelper helper=new FileSystemTestHelper();
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY)) && (namenode.getNamesystem().getCacheUsed() == 0));
}
}
,500,60000);
NamenodeProtocols nnRpc=namenode.getRpcServer();
DataNode dn0=cluster.getDataNodes().get(0);
String bpid=cluster.getNamesystem().getBlockPoolId();
LinkedList bogusBlockIds=new LinkedList();
bogusBlockIds.add(999999L);
nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid),bpid,bogusBlockIds);
Path rootDir=helper.getDefaultWorkingDirectory(dfs);
final String pool="friendlyPool";
nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
final int numFiles=2;
final int numBlocksPerFile=2;
final List paths=new ArrayList(numFiles);
for (int i=0; i < numFiles; i++) {
Path p=new Path(rootDir,"testCachePaths-" + i);
FileSystemTestHelper.createFile(dfs,p,numBlocksPerFile,(int)BLOCK_SIZE);
paths.add(p.toUri().getPath());
}
waitForCachedBlocks(namenode,0,0,"testWaitForCachedReplicas:0");
int expected=0;
for (int i=0; i < numFiles; i++) {
CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(paths.get(i))).setPool(pool).build();
nnRpc.addCacheDirective(directive,EnumSet.noneOf(CacheFlag.class));
expected+=numBlocksPerFile;
waitForCachedBlocks(namenode,expected,expected,"testWaitForCachedReplicas:1");
}
DatanodeInfo[] live=dfs.getDataNodeStats(DatanodeReportType.LIVE);
assertEquals("Unexpected number of live nodes",NUM_DATANODES,live.length);
long totalUsed=0;
for ( DatanodeInfo dn : live) {
final long cacheCapacity=dn.getCacheCapacity();
final long cacheUsed=dn.getCacheUsed();
final long cacheRemaining=dn.getCacheRemaining();
assertEquals("Unexpected cache capacity",CACHE_CAPACITY,cacheCapacity);
assertEquals("Capacity not equal to used + remaining",cacheCapacity,cacheUsed + cacheRemaining);
assertEquals("Remaining not equal to capacity - used",cacheCapacity - cacheUsed,cacheRemaining);
totalUsed+=cacheUsed;
}
assertEquals(expected * BLOCK_SIZE,totalUsed);
RemoteIterator entries=new CacheDirectiveIterator(nnRpc,null);
for (int i=0; i < numFiles; i++) {
CacheDirectiveEntry entry=entries.next();
nnRpc.removeCacheDirective(entry.getInfo().getId());
expected-=numBlocksPerFile;
waitForCachedBlocks(namenode,expected,expected,"testWaitForCachedReplicas:2");
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testCreateAndModifyPools() throws Exception {
String poolName="pool1";
String ownerName="abc";
String groupName="123";
FsPermission mode=new FsPermission((short)0755);
long limit=150;
dfs.addCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
RemoteIterator iter=dfs.listCachePools();
CachePoolInfo info=iter.next().getInfo();
assertEquals(poolName,info.getPoolName());
assertEquals(ownerName,info.getOwnerName());
assertEquals(groupName,info.getGroupName());
ownerName="def";
groupName="456";
mode=new FsPermission((short)0700);
limit=151;
dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit));
iter=dfs.listCachePools();
info=iter.next().getInfo();
assertEquals(poolName,info.getPoolName());
assertEquals(ownerName,info.getOwnerName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
dfs.removeCachePool(poolName);
iter=dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool",iter.hasNext());
proto.listCachePools(null);
try {
proto.removeCachePool("pool99");
fail("expected to get an exception when " + "removing a non-existent pool.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe);
}
try {
proto.removeCachePool(poolName);
fail("expected to get an exception when " + "removing a non-existent pool.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe);
}
iter=dfs.listCachePools();
assertFalse("expected no cache pools after deleting pool",iter.hasNext());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testMaxRelativeExpiry() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative",e);
}
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
fail("Added a pool with too big of a max expiry.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("too big",e);
}
CachePoolInfo coolPool=new CachePoolInfo("coolPool");
final long poolExpiration=1000 * 60 * 10l;
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
RemoteIterator poolIt=dfs.listCachePools();
CachePoolInfo listPool=poolIt.next().getInfo();
assertFalse("Should only be one pool",poolIt.hasNext());
assertEquals("Expected max relative expiry to match set value",poolExpiration,listPool.getMaxRelativeExpiryMs().longValue());
try {
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
}
catch ( InvalidRequestException e) {
assertExceptionContains("negative",e);
}
try {
dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1));
fail("Added a pool with too big of a max expiry.");
}
catch ( InvalidRequestException e) {
assertExceptionContains("too big",e);
}
CacheDirectiveInfo defaultExpiry=new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build();
dfs.addCacheDirective(defaultExpiry);
RemoteIterator dirIt=dfs.listCacheDirectives(defaultExpiry);
CacheDirectiveInfo listInfo=dirIt.next().getInfo();
assertFalse("Should only have one entry in listing",dirIt.hasNext());
long listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Directive expiry should be approximately the pool's max expiry",Math.abs(listExpiration - poolExpiration) < 10 * 1000);
CacheDirectiveInfo.Builder builder=new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName());
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Added a directive that exceeds pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Added a directive that exceeds pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
}
catch ( IllegalArgumentException e) {
assertExceptionContains("is too far in the future",e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
}
catch ( InvalidRequestException e) {
assertExceptionContains("is too far in the future",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Modified a directive to exceed pool's max relative expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("is too far in the future",e);
}
CachePoolInfo destPool=new CachePoolInfo("destPool");
dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build());
fail("Modified a directive to a pool with a lower max expiration");
}
catch ( InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration",e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build());
dirIt=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build());
listInfo=dirIt.next().getInfo();
listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately "+ poolExpiration / 2,Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000);
dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
poolIt=dfs.listCachePools();
listPool=poolIt.next().getInfo();
while (!listPool.getPoolName().equals(destPool.getPoolName())) {
listPool=poolIt.next().getInfo();
}
assertEquals("Expected max relative expiry to match set value",CachePoolInfo.RELATIVE_EXPIRY_NEVER,listPool.getMaxRelativeExpiryMs().longValue());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=120000) public void testExpiry() throws Exception {
String pool="pool1";
dfs.addCachePool(new CachePoolInfo(pool));
Path p=new Path("/mypath");
DFSTestUtil.createFile(dfs,p,BLOCK_SIZE * 2,(short)2,0x999);
Date start=new Date();
Date expiry=DateUtils.addSeconds(start,120);
final long id=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(p).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry)).setReplication((short)2).build());
waitForCachedBlocks(cluster.getNameNode(),2,4,"testExpiry:1");
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(0)).build());
waitForCachedBlocks(cluster.getNameNode(),0,0,"testExpiry:2");
RemoteIterator it=dfs.listCacheDirectives(null);
CacheDirectiveEntry ent=it.next();
assertFalse(it.hasNext());
Date entryExpiry=new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should have expired",entryExpiry.before(new Date()));
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(120000)).build());
waitForCachedBlocks(cluster.getNameNode(),2,4,"testExpiry:3");
it=dfs.listCacheDirectives(null);
ent=it.next();
assertFalse(it.hasNext());
entryExpiry=new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should not have expired",entryExpiry.after(new Date()));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(-1)).build());
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative expiration",e);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testLimit() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
fail("Should not be able to set a negative limit");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative",e);
}
final String destiny="poolofdestiny";
final Path path1=new Path("/destiny");
DFSTestUtil.createFile(dfs,path1,2 * BLOCK_SIZE,(short)1,0x9494);
final CachePoolInfo poolInfo=new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1);
dfs.addCachePool(poolInfo);
final CacheDirectiveInfo info1=new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build();
try {
dfs.addCacheDirective(info1);
fail("Should not be able to cache when there is no more limit");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
poolInfo.setLimit(2 * BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
long id1=dfs.addCacheDirective(info1);
waitForCachePoolStats(dfs,2 * BLOCK_SIZE,2 * BLOCK_SIZE,1,1,poolInfo,"testLimit:1");
final Path path2=new Path("/failure");
DFSTestUtil.createFile(dfs,path2,BLOCK_SIZE,(short)1,0x9495);
try {
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(),EnumSet.noneOf(CacheFlag.class));
fail("Should not be able to add another cached file");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
poolInfo.setLimit(BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
waitForCachePoolStats(dfs,2 * BLOCK_SIZE,0,1,0,poolInfo,"testLimit:2");
RemoteIterator it=dfs.listCachePools();
assertTrue("Expected a cache pool",it.hasNext());
CachePoolStats stats=it.next().getStats();
assertEquals("Overlimit bytes should be difference of needed and limit",BLOCK_SIZE,stats.getBytesOverlimit());
CachePoolInfo inadequate=new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
dfs.addCachePool(inadequate);
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.noneOf(CacheFlag.class));
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity",e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.of(CacheFlag.FORCE));
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(),EnumSet.of(CacheFlag.FORCE));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testCacheManagerRestart() throws Exception {
SecondaryNameNode secondary=null;
try {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
secondary=new SecondaryNameNode(conf);
final String pool="poolparty";
String groupName="partygroup";
FsPermission mode=new FsPermission((short)0777);
long limit=747;
dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
RemoteIterator pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
CachePoolInfo info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
int numEntries=10;
String entryPrefix="/party-";
long prevId=-1;
final Date expiry=new Date();
for (int i=0; i < numEntries; i++) {
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
}
RemoteIterator dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
secondary.doCheckpoint();
final String imagePool="imagePool";
dfs.addCachePool(new CachePoolInfo(imagePool));
prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
boolean fetchImage=secondary.doCheckpoint();
assertTrue("Secondary should have fetched a new fsimage from NameNode",fetchImage);
dfs.removeCachePool(imagePool);
cluster.restartNameNode();
pit=dfs.listCachePools();
assertTrue("No cache pools found",pit.hasNext());
info=pit.next().getInfo();
assertEquals(pool,info.getPoolName());
assertEquals(pool,info.getPoolName());
assertEquals(groupName,info.getGroupName());
assertEquals(mode,info.getMode());
assertEquals(limit,(long)info.getLimit());
assertFalse("Unexpected # of cache pools found",pit.hasNext());
dit=dfs.listCacheDirectives(null);
for (int i=0; i < numEntries; i++) {
assertTrue("Unexpected # of cache entries: " + i,dit.hasNext());
CacheDirectiveInfo cd=dit.next().getInfo();
assertEquals(i + 1,cd.getId().longValue());
assertEquals(entryPrefix + i,cd.getPath().toUri().getPath());
assertEquals(pool,cd.getPool());
assertEquals(expiry.getTime(),cd.getExpiration().getMillis());
}
assertFalse("Unexpected # of cache directives found",dit.hasNext());
long nextId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
assertEquals(prevId + 1,nextId);
}
finally {
if (secondary != null) {
secondary.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAddRemoveDirectives() throws Exception {
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short)0)));
CacheDirectiveInfo alpha=new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
CacheDirectiveInfo beta=new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build();
CacheDirectiveInfo delta=new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build();
long alphaId=addAsUnprivileged(alpha);
long alphaId2=addAsUnprivileged(alpha);
assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo",alphaId == alphaId2);
long betaId=addAsUnprivileged(beta);
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build());
fail("expected an error when adding to a non-existent pool.");
}
catch ( InvalidRequestException ioe) {
GenericTestUtils.assertExceptionContains("Unknown pool",ioe);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build());
fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone).");
}
catch ( AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied while accessing pool",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build());
fail("expected an error when adding a malformed path " + "to the cache directives.");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("is not a valid DFS filename",e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short)1).setPool("").build());
fail("expected an error when adding a cache " + "directive with an empty pool name.");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid empty pool name",e);
}
long deltaId=addAsUnprivileged(delta);
long relativeId=addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build());
RemoteIterator iter;
iter=dfs.listCacheDirectives(null);
validateListAll(iter,alphaId,alphaId2,betaId,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build());
assertFalse(iter.hasNext());
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build());
validateListAll(iter,alphaId,alphaId2,deltaId,relativeId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
validateListAll(iter,betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build());
validateListAll(iter,alphaId2);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build());
validateListAll(iter,relativeId);
dfs.removeCacheDirective(betaId);
iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
assertFalse(iter.hasNext());
try {
dfs.removeCacheDirective(betaId);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
try {
proto.removeCacheDirective(-42l);
fail("expected an error when removing a negative ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid negative ID",e);
}
try {
proto.removeCacheDirective(43l);
fail("expected an error when removing a non-existent ID");
}
catch ( InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID",e);
}
dfs.removeCacheDirective(alphaId);
dfs.removeCacheDirective(alphaId2);
dfs.removeCacheDirective(deltaId);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short)555).build());
iter=dfs.listCacheDirectives(null);
assertTrue(iter.hasNext());
CacheDirectiveInfo modified=iter.next().getInfo();
assertEquals(relativeId,modified.getId().longValue());
assertEquals((short)555,modified.getReplication().shortValue());
dfs.removeCacheDirective(relativeId);
iter=dfs.listCacheDirectives(null);
assertFalse(iter.hasNext());
CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build();
long id=dfs.addCacheDirective(directive);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short)2).build());
dfs.removeCacheDirective(id);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testListCachePoolPermissions() throws Exception {
final UserGroupInformation myUser=UserGroupInformation.createRemoteUser("myuser");
final DistributedFileSystem myDfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser,conf);
final String poolName="poolparty";
dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short)0700)));
RemoteIterator it=myDfs.listCachePools();
CachePoolInfo info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertNull("Unexpected owner name",info.getOwnerName());
assertNull("Unexpected group name",info.getGroupName());
assertNull("Unexpected mode",info.getMode());
assertNull("Unexpected limit",info.getLimit());
final long limit=99;
dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit));
it=myDfs.listCachePools();
info=it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name",poolName,info.getPoolName());
assertEquals("Mismatched owner name",myUser.getShortUserName(),info.getOwnerName());
assertNotNull("Expected group name",info.getGroupName());
assertEquals("Mismatched mode",(short)0700,info.getMode().toShort());
assertEquals("Mismatched limit",limit,(long)info.getLimit());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests save namespace.
*/
@Test public void testSaveNamespace() throws IOException {
DistributedFileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
FSNamesystem namesystem=cluster.getNamesystem();
String renewer=UserGroupInformation.getLoginUser().getUserName();
Token token1=namesystem.getDelegationToken(new Text(renewer));
Token token2=namesystem.getDelegationToken(new Text(renewer));
DFSAdmin admin=new DFSAdmin(conf);
String[] args=new String[]{"-saveNamespace"};
NameNode nn=cluster.getNameNode();
for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.validateLog();
long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should have 5 transactions",5,numTransactions);
;
}
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
}
catch ( Exception e) {
throw new IOException(e.getMessage());
}
for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.validateLog();
long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should only have START txn",1,numTransactions);
}
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
try {
renewToken(token1);
renewToken(token2);
}
catch ( IOException e) {
fail("Could not renew or cancel the token");
}
namesystem=cluster.getNamesystem();
Token token3=namesystem.getDelegationToken(new Text(renewer));
Token token4=namesystem.getDelegationToken(new Text(renewer));
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem=cluster.getNamesystem();
Token token5=namesystem.getDelegationToken(new Text(renewer));
try {
renewToken(token1);
renewToken(token2);
renewToken(token3);
renewToken(token4);
renewToken(token5);
}
catch ( IOException e) {
fail("Could not renew or cancel the token");
}
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem=cluster.getNamesystem();
try {
renewToken(token1);
cancelToken(token1);
renewToken(token2);
cancelToken(token2);
renewToken(token3);
cancelToken(token3);
renewToken(token4);
cancelToken(token4);
renewToken(token5);
cancelToken(token5);
}
catch ( IOException e) {
fail("Could not renew or cancel the token");
}
}
finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier
/**
* Test case where two secondary namenodes are checkpointing the same
* NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}since that test runs against two distinct NNs.
* This case tests the following interleaving:
* - 2NN A) calls rollEdits()
* - 2NN B) calls rollEdits()
* - 2NN A) paused at getRemoteEditLogManifest()
* - 2NN B) calls getRemoteEditLogManifest() (returns up to txid 4)
* - 2NN B) uploads checkpoint fsimage_4
* - 2NN A) allowed to proceed, also returns up to txid 4
* - 2NN A) uploads checkpoint fsimage_4 as well, should fail gracefully
* It verifies that one of the two gets an error that it's uploading a
* duplicate checkpoint, and the other one succeeds.
*/
@Test public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
SecondaryNameNode secondary1=null, secondary2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
secondary1=startSecondaryNameNode(conf,1);
secondary2=startSecondaryNameNode(conf,2);
final NamenodeProtocol origNN=secondary1.getNameNode();
final Answer delegator=new GenericTestUtils.DelegateAnswer(origNN);
NamenodeProtocol spyNN=Mockito.mock(NamenodeProtocol.class,delegator);
DelayAnswer delayer=new DelayAnswer(LOG){
@Override protected Object passThrough( InvocationOnMock invocation) throws Throwable {
return delegator.answer(invocation);
}
}
;
secondary1.setNameNode(spyNN);
Mockito.doAnswer(delayer).when(spyNN).getEditLogManifest(Mockito.anyLong());
DoCheckpointThread checkpointThread=new DoCheckpointThread(secondary1);
checkpointThread.start();
delayer.waitForCall();
secondary2.doCheckpoint();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
assertEquals(4,storage.getMostRecentCheckpointTxId());
delayer.proceed();
checkpointThread.join();
checkpointThread.propagateExceptions();
assertEquals(4,storage.getMostRecentCheckpointTxId());
secondary2.doCheckpoint();
assertEquals(6,storage.getMostRecentCheckpointTxId());
assertNNHasCheckpoints(cluster,ImmutableList.of(4,6));
secondary1.setNameNode(origNN);
secondary1.doCheckpoint();
assertEquals(8,storage.getMostRecentCheckpointTxId());
assertParallelFilesInvariant(cluster,ImmutableList.of(secondary1,secondary2));
assertNNHasCheckpoints(cluster,ImmutableList.of(6,8));
}
finally {
cleanup(secondary1);
secondary1=null;
cleanup(secondary2);
secondary2=null;
cleanup(cluster);
cluster=null;
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCommandLineParsing() throws ParseException {
SecondaryNameNode.CommandLineOpts opts=new SecondaryNameNode.CommandLineOpts();
opts.parse();
assertNull(opts.getCommand());
opts.parse("-checkpoint");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand());
assertFalse(opts.shouldForceCheckpoint());
opts.parse("-checkpoint","force");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand());
assertTrue(opts.shouldForceCheckpoint());
opts.parse("-geteditsize");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.GETEDITSIZE,opts.getCommand());
opts.parse("-format");
assertTrue(opts.shouldFormat());
try {
opts.parse("-geteditsize","-checkpoint");
fail("Should have failed bad parsing for two actions");
}
catch ( ParseException e) {
LOG.warn("Encountered ",e);
}
try {
opts.parse("-checkpoint","xx");
fail("Should have failed for bad checkpoint arg");
}
catch ( ParseException e) {
LOG.warn("Encountered ",e);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testReloadOnEditReplayFailure() throws IOException {
Configuration conf=new HdfsConfiguration();
FSDataOutputStream fos=null;
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
fos=fs.create(new Path("tmpfile0"));
fos.write(new byte[]{0,1,2,3});
secondary.doCheckpoint();
fos.write(new byte[]{0,1,2,3});
fos.hsync();
Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge();
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
}
Mockito.reset(faultInjector);
fos.write(new byte[]{0,1,2,3});
fos.hsync();
assertTrue("Another checkpoint should have reloaded image",secondary.doCheckpoint());
}
finally {
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
Mockito.reset(faultInjector);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier PublicFieldVerifier HybridVerifier
/**
* Tests checkpoint in HDFS.
*/
@Test public void testCheckpoint() throws IOException {
Path file1=new Path("checkpoint.dat");
Path file2=new Path("checkpoint2.dat");
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
assertTrue(!fileSys.exists(file2));
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file1,replication);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
MetricsRecordBuilder rb=getMetrics(NN_METRICS);
assertCounterGt("GetImageNumOps",0,rb);
assertCounterGt("GetEditNumOps",0,rb);
assertCounterGt("PutImageNumOps",0,rb);
assertGaugeGt("GetImageAvgTime",0.0,rb);
assertGaugeGt("GetEditAvgTime",0.0,rb);
assertGaugeGt("PutImageAvgTime",0.0,rb);
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
Path tmpDir=new Path("/tmp_tmp");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
DFSTestUtil.createFile(fileSys,file2,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file2,replication);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
FSDirectory secondaryFsDir=secondary.getFSNamesystem().dir;
INode rootInMap=secondaryFsDir.getInode(secondaryFsDir.rootDir.getId());
assertSame(rootInMap,secondaryFsDir.rootDir);
fileSys.delete(tmpDir,true);
fileSys.mkdirs(tmpDir);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(tmpDir));
try {
checkFile(fileSys,file2,replication);
}
finally {
fileSys.close();
cluster.shutdown();
cluster=null;
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test case where the NN is configured with a name-only and an edits-only
* dir, with storage-restore turned on. In this case, if the name-only dir
* disappears and comes back, a new checkpoint after it has been restored
* should function correctly.
* @throws Exception
*/
@Test public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
File currentDir=null;
Configuration conf=new HdfsConfiguration();
File base_dir=new File(MiniDFSCluster.getBaseDirectory());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,true);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/name-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/edits-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,fileAsURI(new File(base_dir,"namesecondary1")).toString());
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).manageNameDfsDirs(false).build();
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
NamenodeProtocols nn=cluster.getNameNodeRpc();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0=storage.getStorageDir(0);
assertEquals(NameNodeDirType.IMAGE,sd0.getStorageDirType());
currentDir=sd0.getCurrentDir();
assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"000"));
try {
secondary.doCheckpoint();
fail("Did not fail to checkpoint when there are no valid storage dirs");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("No targets in destination storage",ioe);
}
assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"755"));
nn.restoreFailedStorage("true");
nn.rollEditLog();
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster,ImmutableList.of(8));
assertParallelFilesInvariant(cluster,ImmutableList.of(secondary));
}
finally {
if (currentDir != null) {
FileUtil.chmod(currentDir.getAbsolutePath(),"755");
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier NullVerifier
/**
* Regression test for HDFS-3835 - "Long-lived 2NN cannot perform a
* checkpoint if security is enabled and the NN restarts without outstanding
* delegation tokens"
*/
@Test public void testSecondaryNameNodeWithDelegationTokens() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
assertNotNull(cluster.getNamesystem().getDelegationToken(new Text("atm")));
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
secondary.doCheckpoint();
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier BooleanVerifier
@Test public void testSecondaryNamenodeError1() throws IOException {
LOG.info("Starting testSecondaryNamenodeError1");
Configuration conf=new HdfsConfiguration();
Path file1=new Path("checkpointxx.dat");
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
secondary=startSecondaryNameNode(conf);
Mockito.doThrow(new IOException("Injecting failure after rolling edit logs")).when(faultInjector).afterSecondaryCallsRollEditLog();
try {
secondary.doCheckpoint();
assertTrue(false);
}
catch ( IOException e) {
}
Mockito.reset(faultInjector);
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file1,replication);
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the secondary doesn't have to re-download image
* if it hasn't changed.
*/
@Test public void testSecondaryImageDownload() throws IOException {
LOG.info("Starting testSecondaryImageDownload");
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0");
Path dir=new Path("/checkpoint");
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
FileSystem fileSys=cluster.getFileSystem();
FSImage image=cluster.getNameNode().getFSImage();
SecondaryNameNode secondary=null;
try {
assertTrue(!fileSys.exists(dir));
secondary=startSecondaryNameNode(conf);
File secondaryDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary1");
File secondaryCurrent=new File(secondaryDir,"current");
long expectedTxIdToDownload=cluster.getNameNode().getFSImage().getStorage().getMostRecentCheckpointTxId();
File secondaryFsImageBefore=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload));
File secondaryFsImageAfter=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload + 2));
assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists",secondaryFsImageBefore.exists());
assertTrue("Secondary should have loaded an image",secondary.doCheckpoint());
assertTrue("Secondary should have downloaded original image",secondaryFsImageBefore.exists());
assertTrue("Secondary should have created a new image",secondaryFsImageAfter.exists());
long fsimageLength=secondaryFsImageBefore.length();
assertEquals("Image size should not have changed",fsimageLength,secondaryFsImageAfter.length());
fileSys.mkdirs(dir);
assertFalse("Another checkpoint should not have to re-load image",secondary.doCheckpoint());
for ( StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
File imageFile=NNStorage.getImageFile(sd,NameNodeFile.IMAGE,expectedTxIdToDownload + 5);
assertTrue("Image size increased",imageFile.length() > fsimageLength);
}
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier BooleanVerifier
@Test public void testSecondaryNamenodeError2() throws IOException {
LOG.info("Starting testSecondaryNamenodeError2");
Configuration conf=new HdfsConfiguration();
Path file1=new Path("checkpointyy.dat");
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
secondary=startSecondaryNameNode(conf);
Mockito.doThrow(new IOException("Injecting failure after uploading new image")).when(faultInjector).afterSecondaryUploadsNewImage();
try {
secondary.doCheckpoint();
assertTrue(false);
}
catch ( IOException e) {
}
Mockito.reset(faultInjector);
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file1,replication);
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier BooleanVerifier
@Test public void testSecondaryNamenodeError3() throws IOException {
LOG.info("Starting testSecondaryNamenodeError3");
Configuration conf=new HdfsConfiguration();
Path file1=new Path("checkpointzz.dat");
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
secondary=startSecondaryNameNode(conf);
Mockito.doThrow(new IOException("Injecting failure after rolling edit logs")).when(faultInjector).afterSecondaryCallsRollEditLog();
try {
secondary.doCheckpoint();
assertTrue(false);
}
catch ( IOException e) {
}
Mockito.reset(faultInjector);
secondary.shutdown();
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replication,seed);
checkFile(fileSys,file1,replication);
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
secondary=startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
}
finally {
fileSys.close();
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test that, if the edits dir is separate from the name dir, it is
* properly locked.
*/
@Test public void testSeparateEditsDirLocking() throws IOException {
Configuration conf=new HdfsConfiguration();
File nameDir=new File(MiniDFSCluster.getBaseDirectory(),"name");
File editsDir=new File(MiniDFSCluster.getBaseDirectory(),"testSeparateEditsDirLocking");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsDir.getAbsolutePath());
MiniDFSCluster cluster=null;
StorageDirectory savedSd=null;
try {
cluster=new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false).numDataNodes(0).build();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
for ( StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
assertEquals(editsDir.getAbsoluteFile(),sd.getRoot());
assertLockFails(sd);
savedSd=sd;
}
}
finally {
cleanup(cluster);
cluster=null;
}
assertNotNull(savedSd);
assertClusterStartFailsWhenDirLocked(conf,savedSd);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Starts two namenodes and two secondary namenodes, verifies that secondary
* namenodes are configured correctly to talk to their respective namenodes
* and can do the checkpoint.
* @throws IOException
*/
@Test public void testMultipleSecondaryNamenodes() throws IOException {
Configuration conf=new HdfsConfiguration();
String nameserviceId1="ns1";
String nameserviceId2="ns2";
conf.set(DFSConfigKeys.DFS_NAMESERVICES,nameserviceId1 + "," + nameserviceId2);
MiniDFSCluster cluster=null;
SecondaryNameNode secondary1=null;
SecondaryNameNode secondary2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).build();
Configuration snConf1=new HdfsConfiguration(cluster.getConfiguration(0));
Configuration snConf2=new HdfsConfiguration(cluster.getConfiguration(1));
InetSocketAddress nn1RpcAddress=cluster.getNameNode(0).getNameNodeAddress();
InetSocketAddress nn2RpcAddress=cluster.getNameNode(1).getNameNodeAddress();
String nn1=nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
String nn2=nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"");
snConf1.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId1),nn1);
snConf2.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId2),nn2);
secondary1=startSecondaryNameNode(snConf1);
secondary2=startSecondaryNameNode(snConf2);
assertEquals(secondary1.getNameNodeAddress().getPort(),nn1RpcAddress.getPort());
assertEquals(secondary2.getNameNodeAddress().getPort(),nn2RpcAddress.getPort());
assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2.getNameNodeAddress().getPort());
secondary1.doCheckpoint();
secondary2.doCheckpoint();
}
finally {
cleanup(secondary1);
secondary1=null;
cleanup(secondary2);
secondary2=null;
cleanup(cluster);
cluster=null;
}
}
InternalCallVerifier BooleanVerifier
/**
* Checks that an IOException in NNStorage.writeTransactionIdFile is handled
* correctly (by removing the storage directory)
* See https://issues.apache.org/jira/browse/HDFS-2011
*/
@Test public void testWriteTransactionIdHandlesIOE() throws Exception {
LOG.info("Check IOException handled correctly by writeTransactionIdFile");
ArrayList fsImageDirs=new ArrayList();
ArrayList editsDirs=new ArrayList();
File filePath=new File(PathUtils.getTestDir(getClass()),"storageDirToCheck");
assertTrue("Couldn't create directory storageDirToCheck",filePath.exists() || filePath.mkdirs());
fsImageDirs.add(filePath.toURI());
editsDirs.add(filePath.toURI());
NNStorage nnStorage=new NNStorage(new HdfsConfiguration(),fsImageDirs,editsDirs);
try {
assertTrue("List of storage directories didn't have storageDirToCheck.",nnStorage.getEditsDirectories().iterator().next().toString().indexOf("storageDirToCheck") != -1);
assertTrue("List of removed storage directories wasn't empty",nnStorage.getRemovedStorageDirs().isEmpty());
}
finally {
assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),filePath.delete());
}
nnStorage.writeTransactionIdFileToStorage(1);
List listRsd=nnStorage.getRemovedStorageDirs();
assertTrue("Removed directory wasn't what was expected",listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().toString().indexOf("storageDirToCheck") != -1);
nnStorage.close();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testTooManyEditReplayFailures() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY,"1");
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,"1");
FSDataOutputStream fos=null;
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).checkExitOnShutdown(false).build();
cluster.waitActive();
fs=cluster.getFileSystem();
fos=fs.create(new Path("tmpfile0"));
fos.write(new byte[]{0,1,2,3});
Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge();
secondary=startSecondaryNameNode(conf);
secondary.doWork();
fail("2NN did not exit.");
}
catch ( ExitException ee) {
ExitUtil.resetFirstExitException();
assertEquals("Max retries",1,secondary.getMergeErrorCount() - 1);
}
finally {
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
Mockito.reset(faultInjector);
}
}
InternalCallVerifier BooleanVerifier
/**
* Test that the SecondaryNameNode properly locks its storage directories.
*/
@Test public void testSecondaryNameNodeLocking() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
StorageDirectory savedSd=null;
secondary=startSecondaryNameNode(conf);
NNStorage storage=secondary.getFSImage().getStorage();
for ( StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
savedSd=sd;
}
LOG.info("===> Shutting down first 2NN");
secondary.shutdown();
secondary=null;
LOG.info("===> Locking a dir, starting second 2NN");
LOG.info("Trying to lock" + savedSd);
savedSd.lock();
try {
secondary=startSecondaryNameNode(conf);
assertFalse("Should fail to start 2NN when " + savedSd + " is locked",savedSd.isLockSupported());
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("already locked",ioe);
}
finally {
savedSd.unlock();
}
}
finally {
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that a fault while downloading edits does not prevent future
* checkpointing
*/
@Test(timeout=30000) public void testEditFailureBeforeRename() throws IOException {
Configuration conf=new HdfsConfiguration();
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs,new Path("tmpfile0"),1024,(short)1,0l);
secondary.doCheckpoint();
Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs,new Path("tmpfile1"),1024,(short)1,0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Injecting failure before edit rename",ioe);
}
Mockito.reset(faultInjector);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Expected a single tmp edits file in directory " + sd.toString(),tmpEdits.length == 1);
RandomAccessFile randFile=new RandomAccessFile(tmpEdits[0],"rw");
randFile.setLength(0);
randFile.close();
}
secondary.doCheckpoint();
}
finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
InternalCallVerifier EqualityVerifier
/**
* Test case where two secondary namenodes are checkpointing the same
* NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}since that test runs against two distinct NNs.
* This case tests the following interleaving:
* - 2NN A downloads image (up to txid 2)
* - 2NN A about to save its own checkpoint
* - 2NN B downloads image (up to txid 4)
* - 2NN B uploads checkpoint (txid 4)
* - 2NN A uploads checkpoint (txid 2)
* It verifies that this works even though the earlier-txid checkpoint gets
* uploaded after the later-txid checkpoint.
*/
@Test public void testMultipleSecondaryNNsAgainstSameNN() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
SecondaryNameNode secondary1=null, secondary2=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
secondary1=startSecondaryNameNode(conf,1);
secondary2=startSecondaryNameNode(conf,2);
CheckpointStorage spyImage1=spyOnSecondaryImage(secondary1);
DelayAnswer delayer=new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(spyImage1).saveFSImageInAllDirs(Mockito.any(),Mockito.anyLong());
DoCheckpointThread checkpointThread=new DoCheckpointThread(secondary1);
checkpointThread.start();
delayer.waitForCall();
secondary2.doCheckpoint();
delayer.proceed();
checkpointThread.join();
checkpointThread.propagateExceptions();
NNStorage storage=cluster.getNameNode().getFSImage().getStorage();
assertEquals(4,storage.getMostRecentCheckpointTxId());
assertNNHasCheckpoints(cluster,ImmutableList.of(2,4));
secondary2.doCheckpoint();
assertEquals(6,storage.getMostRecentCheckpointTxId());
assertParallelFilesInvariant(cluster,ImmutableList.of(secondary1,secondary2));
assertNNHasCheckpoints(cluster,ImmutableList.of(4,6));
}
finally {
cleanup(secondary1);
secondary1=null;
cleanup(secondary2);
secondary2=null;
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests save namespace.
*/
@Test public void testSaveNamespace() throws IOException {
MiniDFSCluster cluster=null;
DistributedFileSystem fs=null;
FileContext fc;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
fs=(cluster.getFileSystem());
fc=FileContext.getFileContext(cluster.getURI(0));
DFSAdmin admin=new DFSAdmin(conf);
String[] args=new String[]{"-saveNamespace"};
try {
admin.run(args);
}
catch ( IOException eIO) {
assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
}
catch ( Exception e) {
throw new IOException(e);
}
Path file=new Path("namespace.dat");
DFSTestUtil.createFile(fs,file,fileSize,fileSize,blockSize,replication,seed);
checkFile(fs,file,replication);
Path symlink=new Path("file.link");
fc.createSymlink(file,symlink,false);
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
Collection editsDirs=cluster.getNameEditsDirs(0);
for ( URI uri : editsDirs) {
File ed=new File(uri.getPath());
assertTrue(new File(ed,"current/" + NNStorage.getInProgressEditsFileName(1)).length() > Integer.SIZE / Byte.SIZE);
}
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
}
catch ( Exception e) {
throw new IOException(e);
}
final int EXPECTED_TXNS_FIRST_SEG=13;
for ( URI uri : editsDirs) {
File ed=new File(uri.getPath());
File curDir=new File(ed,"current");
LOG.info("Files in " + curDir + ":\n "+ Joiner.on("\n ").join(curDir.list()));
File originalEdits=new File(curDir,NNStorage.getInProgressEditsFileName(1));
assertFalse(originalEdits.exists());
File finalizedEdits=new File(curDir,NNStorage.getFinalizedEditsFileName(1,EXPECTED_TXNS_FIRST_SEG));
GenericTestUtils.assertExists(finalizedEdits);
assertTrue(finalizedEdits.length() > Integer.SIZE / Byte.SIZE);
GenericTestUtils.assertExists(new File(ed,"current/" + NNStorage.getInProgressEditsFileName(EXPECTED_TXNS_FIRST_SEG + 1)));
}
Collection imageDirs=cluster.getNameDirs(0);
for ( URI uri : imageDirs) {
File imageDir=new File(uri.getPath());
File savedImage=new File(imageDir,"current/" + NNStorage.getImageFileName(EXPECTED_TXNS_FIRST_SEG));
assertTrue("Should have saved image at " + savedImage,savedImage.exists());
}
cluster.shutdown();
cluster=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fs=(cluster.getFileSystem());
checkFile(fs,file,replication);
fc=FileContext.getFileContext(cluster.getURI(0));
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
}
finally {
if (fs != null) fs.close();
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-3849. This makes sure that when we re-load the
* FSImage in the 2NN, we clear the existing leases.
*/
@Test public void testSecondaryNameNodeWithSavedLeases() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
FSDataOutputStream fos=null;
Configuration conf=new HdfsConfiguration();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
FileSystem fs=cluster.getFileSystem();
fos=fs.create(new Path("tmpfile"));
fos.write(new byte[]{0,1,2,3});
fos.hflush();
assertEquals(1,cluster.getNamesystem().getLeaseManager().countLease());
secondary=startSecondaryNameNode(conf);
assertEquals(0,secondary.getFSNamesystem().getLeaseManager().countLease());
secondary.doCheckpoint();
assertEquals(1,secondary.getFSNamesystem().getLeaseManager().countLease());
fos.close();
fos=null;
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
secondary.doCheckpoint();
assertEquals(0,secondary.getFSNamesystem().getLeaseManager().countLease());
}
finally {
if (fos != null) {
fos.close();
}
cleanup(secondary);
secondary=null;
cleanup(cluster);
cluster=null;
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test that the secondary namenode correctly deletes temporary edits
* on startup.
*/
@Test(timeout=60000) public void testDeleteTemporaryEditsOnStartup() throws IOException {
Configuration conf=new HdfsConfiguration();
SecondaryNameNode secondary=null;
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs=cluster.getFileSystem();
secondary=startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs,new Path("tmpfile0"),1024,(short)1,0l);
secondary.doCheckpoint();
Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs,new Path("tmpfile1"),1024,(short)1,0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Injecting failure before edit rename",ioe);
}
Mockito.reset(faultInjector);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Expected a single tmp edits file in directory " + sd.toString(),tmpEdits.length == 1);
}
secondary.shutdown();
secondary=startSecondaryNameNode(conf);
for ( StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits=sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Did not expect a tmp edits file in directory " + sd.toString(),tmpEdits.length == 0);
}
secondary.doCheckpoint();
}
finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test to ensure namenode rejects request from dead datanode
* - Start a cluster
* - Shutdown the datanode and wait for it to be marked dead at the namenode
* - Send datanode requests to Namenode and make sure it is rejected
* appropriately.
*/
@Test public void testDeadDatanode() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1L);
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
String poolId=cluster.getNamesystem().getBlockPoolId();
DataNode dn=cluster.getDataNodes().get(0);
DatanodeRegistration reg=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0),poolId);
waitForDatanodeState(reg.getDatanodeUuid(),true,20000);
dn.shutdown();
waitForDatanodeState(reg.getDatanodeUuid(),false,20000);
DatanodeProtocol dnp=cluster.getNameNodeRpc();
ReceivedDeletedBlockInfo[] blocks={new ReceivedDeletedBlockInfo(new Block(0),ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,null)};
StorageReceivedDeletedBlocks[] storageBlocks={new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(),blocks)};
try {
dnp.blockReceivedAndDeleted(reg,poolId,storageBlocks);
fail("Expected IOException is not thrown");
}
catch ( IOException ex) {
}
StorageBlockReport[] report={new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()),new long[]{0L,0L,0L})};
try {
dnp.blockReport(reg,poolId,report);
fail("Expected IOException is not thrown");
}
catch ( IOException ex) {
}
StorageReport[] rep={new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()),false,0,0,0,0)};
DatanodeCommand[] cmd=dnp.sendHeartbeat(reg,rep,0L,0L,0,0,0).getCommands();
assertEquals(1,cmd.length);
assertEquals(cmd[0].getAction(),RegisterCommand.REGISTER.getAction());
}
IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier
/**
* Tests Decommissioning Status in DFS.
*/
@Test public void testDecommissionStatus() throws IOException, InterruptedException {
InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort());
DFSClient client=new DFSClient(addr,conf);
DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ",2,info.length);
DistributedFileSystem fileSys=cluster.getFileSystem();
DFSAdmin admin=new DFSAdmin(cluster.getConfiguration(0));
short replicas=2;
Path file1=new Path("decommission.dat");
DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replicas,seed);
Path file2=new Path("decommission1.dat");
FSDataOutputStream st1=writeIncompleteFile(fileSys,file2,replicas);
Thread.sleep(5000);
FSNamesystem fsn=cluster.getNamesystem();
final DatanodeManager dm=fsn.getBlockManager().getDatanodeManager();
for (int iteration=0; iteration < numDatanodes; iteration++) {
String downnode=decommissionNode(fsn,client,localFileSys,iteration);
dm.refreshNodes(conf);
decommissionedNodes.add(downnode);
Thread.sleep(5000);
final List decommissioningNodes=dm.getDecommissioningNodes();
if (iteration == 0) {
assertEquals(decommissioningNodes.size(),1);
DatanodeDescriptor decommNode=decommissioningNodes.get(0);
checkDecommissionStatus(decommNode,4,0,2);
checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0,1),fileSys,admin);
}
else {
assertEquals(decommissioningNodes.size(),2);
DatanodeDescriptor decommNode1=decommissioningNodes.get(0);
DatanodeDescriptor decommNode2=decommissioningNodes.get(1);
checkDecommissionStatus(decommNode1,4,4,2);
checkDecommissionStatus(decommNode2,4,4,2);
checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0,2),fileSys,admin);
}
}
writeConfigFile(localFileSys,excludeFile,null);
dm.refreshNodes(conf);
st1.close();
cleanupFile(fileSys,file1);
cleanupFile(fileSys,file2);
}
InternalCallVerifier EqualityVerifier
@Test public void testDeduplicationMap(){
DeduplicationMap m=DeduplicationMap.newMap();
Assert.assertEquals(1,m.getId("1"));
Assert.assertEquals(2,m.getId("2"));
Assert.assertEquals(3,m.getId("3"));
Assert.assertEquals(1,m.getId("1"));
Assert.assertEquals(2,m.getId("2"));
Assert.assertEquals(3,m.getId("3"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the quota can be correctly updated for append
*/
@Test(timeout=60000) public void testUpdateQuotaForAppend() throws Exception {
final Path foo=new Path(dir,"foo");
final Path bar=new Path(foo,"bar");
long currentFileLen=BLOCKSIZE;
DFSTestUtil.createFile(dfs,bar,currentFileLen,REPLICATION,seed);
dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE / 2);
currentFileLen+=(BLOCKSIZE / 2);
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
assertTrue(fooNode.isQuotaSet());
Quota.Counts quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
long ns=quota.get(Quota.NAMESPACE);
long ds=quota.get(Quota.DISKSPACE);
assertEquals(2,ns);
assertEquals(currentFileLen * REPLICATION,ds);
ContentSummary c=dfs.getContentSummary(foo);
assertEquals(c.getSpaceConsumed(),ds);
DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE);
currentFileLen+=BLOCKSIZE;
quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns=quota.get(Quota.NAMESPACE);
ds=quota.get(Quota.DISKSPACE);
assertEquals(2,ns);
assertEquals(currentFileLen * REPLICATION,ds);
c=dfs.getContentSummary(foo);
assertEquals(c.getSpaceConsumed(),ds);
DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE * 3 + BLOCKSIZE / 8);
currentFileLen+=(BLOCKSIZE * 3 + BLOCKSIZE / 8);
quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns=quota.get(Quota.NAMESPACE);
ds=quota.get(Quota.DISKSPACE);
assertEquals(2,ns);
assertEquals(currentFileLen * REPLICATION,ds);
c=dfs.getContentSummary(foo);
assertEquals(c.getSpaceConsumed(),ds);
}
InternalCallVerifier EqualityVerifier
/**
* Test if the quota can be correctly updated when file length is updated
* through fsync
*/
@Test(timeout=60000) public void testUpdateQuotaForFSync() throws Exception {
final Path foo=new Path("/foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(dfs,bar,BLOCKSIZE,REPLICATION,0L);
dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
FSDataOutputStream out=dfs.append(bar);
out.write(new byte[BLOCKSIZE / 4]);
((DFSOutputStream)out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
Quota.Counts quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
long ns=quota.get(Quota.NAMESPACE);
long ds=quota.get(Quota.DISKSPACE);
assertEquals(2,ns);
assertEquals(BLOCKSIZE * 2 * REPLICATION,ds);
out.write(new byte[BLOCKSIZE / 4]);
out.close();
fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns=quota.get(Quota.NAMESPACE);
ds=quota.get(Quota.DISKSPACE);
assertEquals(2,ns);
assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION,ds);
DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE);
quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns=quota.get(Quota.NAMESPACE);
ds=quota.get(Quota.DISKSPACE);
assertEquals(2,ns);
assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION,ds);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the quota can be correctly updated for create file
*/
@Test(timeout=60000) public void testQuotaUpdateWithFileCreate() throws Exception {
final Path foo=new Path(dir,"foo");
Path createdFile=new Path(foo,"created_file.data");
dfs.mkdirs(foo);
dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
long fileLen=BLOCKSIZE * 2 + BLOCKSIZE / 2;
DFSTestUtil.createFile(dfs,createdFile,BLOCKSIZE / 16,fileLen,BLOCKSIZE,REPLICATION,seed);
INode fnode=fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
Quota.Counts cnt=fnode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,cnt.get(Quota.NAMESPACE));
assertEquals(fileLen * REPLICATION,cnt.get(Quota.DISKSPACE));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test loading an editlog which has had both its storage fail
* on alternating rolls. Two edit log directories are created.
* The first one fails on odd rolls, the second on even. Test
* that we are able to load the entire editlog regardless.
*/
@Test public void testAlternatingJournalFailure() throws IOException {
File f1=new File(TEST_DIR + "/alternatingjournaltest0");
File f2=new File(TEST_DIR + "/alternatingjournaltest1");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI());
NNStorage storage=setupEdits(editUris,10,new AbortSpec(1,0),new AbortSpec(2,1),new AbortSpec(3,0),new AbortSpec(4,1),new AbortSpec(5,0),new AbortSpec(6,1),new AbortSpec(7,0),new AbortSpec(8,1),new AbortSpec(9,0),new AbortSpec(10,1));
long totaltxnread=0;
FSEditLog editlog=getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId=1;
Iterable editStreams=editlog.selectInputStreams(startTxId,TXNS_PER_ROLL * 11);
for ( EditLogInputStream edits : editStreams) {
FSEditLogLoader.EditLogValidation val=FSEditLogLoader.validateEditLog(edits);
long read=(val.getEndTxId() - edits.getFirstTxId()) + 1;
LOG.info("Loading edits " + edits + " read "+ read);
assertEquals(startTxId,edits.getFirstTxId());
startTxId+=read;
totaltxnread+=read;
}
editlog.close();
storage.close();
assertEquals(TXNS_PER_ROLL * 11,totaltxnread);
}
InternalCallVerifier EqualityVerifier
@Test public void testSyncBatching() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
ExecutorService threadA=Executors.newSingleThreadExecutor();
ExecutorService threadB=Executors.newSingleThreadExecutor();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
final FSEditLog editLog=fsimage.getEditLog();
assertEquals("should start with only the BEGIN_LOG_SEGMENT txn synced",1,editLog.getSyncTxId());
doLogEdit(threadA,editLog,"thread-a 1");
assertEquals("logging edit without syncing should do not affect txid",1,editLog.getSyncTxId());
doLogEdit(threadB,editLog,"thread-b 1");
assertEquals("logging edit without syncing should do not affect txid",1,editLog.getSyncTxId());
doCallLogSync(threadB,editLog);
assertEquals("logSync from second thread should bump txid up to 3",3,editLog.getSyncTxId());
doCallLogSync(threadA,editLog);
assertEquals("logSync from first thread shouldn't change txid",3,editLog.getSyncTxId());
assertCounter("TransactionsBatchedInSync",1L,getMetrics("NameNodeActivity"));
}
finally {
threadA.shutdown();
threadB.shutdown();
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test case for loading a very simple edit log from a format
* prior to the inclusion of edit transaction IDs in the log.
*/
@Test public void testPreTxidEditLogWithEdits() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final FSNamesystem namesystem=cluster.getNamesystem();
long numEdits=testLoad(HADOOP20_SOME_EDITS,namesystem);
assertEquals(3,numEdits);
HdfsFileStatus fileInfo=namesystem.getFileInfo("/myfile",false);
assertEquals("supergroup",fileInfo.getGroup());
assertEquals(3,fileInfo.getReplication());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Test what happens with the following sequence:
* Thread A writes edit
* Thread B calls logSyncAll
* calls close() on stream
* Thread A calls logSync
* This sequence is legal and can occur if enterSafeMode() is closely
* followed by saveNamespace.
*/
@Test public void testBatchedSyncWithClosedLogs() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
ExecutorService threadA=Executors.newSingleThreadExecutor();
ExecutorService threadB=Executors.newSingleThreadExecutor();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
final FSEditLog editLog=fsimage.getEditLog();
doLogEdit(threadA,editLog,"thread-a 1");
assertEquals("logging edit without syncing should do not affect txid",1,editLog.getSyncTxId());
doCallLogSyncAll(threadB,editLog);
assertEquals("logSyncAll should sync thread A's transaction",2,editLog.getSyncTxId());
editLog.close();
doCallLogSync(threadA,editLog);
}
finally {
threadA.shutdown();
threadB.shutdown();
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests the getEditLogManifest function using mock storage for a number
* of different situations.
*/
@Test public void testEditLogManifestMocks() throws IOException {
NNStorage storage;
FSEditLog log;
storage=mockStorageWithEdits("[1,100]|[101,200]|[201,]","[1,100]|[101,200]|[201,]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",log.getEditLogManifest(101).toString());
storage=mockStorageWithEdits("[1,100]|[101,200]","[1,100]|[201,300]|[301,400]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200], [201,300], [301,400]]",log.getEditLogManifest(1).toString());
storage=mockStorageWithEdits("[1,100]|[301,400]","[301,400]|[401,500]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[301,400], [401,500]]",log.getEditLogManifest(1).toString());
storage=mockStorageWithEdits("[1,100]|[101,150]","[1,50]|[101,200]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",log.getEditLogManifest(101).toString());
storage=mockStorageWithEdits("[1,100]|[101,]","[1,100]|[101,200]");
log=getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",log.getEditLogManifest(101).toString());
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testSingleRequiredFailedEditsDirOnSetReadyToFlush() throws IOException {
String[] editsDirs=cluster.getConfiguration(0).getTrimmedStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
shutDownMiniCluster();
Configuration conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,editsDirs[0]);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,0);
setUpMiniCluster(conf,true);
assertTrue(doAnEdit());
invalidateEditsDirAtIndex(0,false,false);
JournalAndStream nonRequiredJas=getJournalAndStream(1);
EditLogFileOutputStream nonRequiredSpy=spyOnStream(nonRequiredJas);
assertTrue(nonRequiredJas.isActive());
try {
doAnEdit();
fail("A single failure of a required journal should have halted the NN");
}
catch ( RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains("setReadyToFlush failed for required journal",re);
}
Mockito.verify(nonRequiredSpy,Mockito.never()).setReadyToFlush();
assertFalse(nonRequiredJas.isActive());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Tests rolling edit logs while transactions are ongoing.
*/
@Test public void testEditLogRolling() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
AtomicReference caughtErr=new AtomicReference();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
StorageDirectory sd=fsimage.getStorage().getStorageDir(0);
startTransactionWorkers(namesystem,caughtErr);
long previousLogTxId=1;
for (int i=0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
}
catch ( InterruptedException e) {
}
LOG.info("Starting roll " + i + ".");
CheckpointSignature sig=namesystem.rollEditLog();
long nextLog=sig.curSegmentTxId;
String logFileName=NNStorage.getFinalizedEditsFileName(previousLogTxId,nextLog - 1);
previousLogTxId+=verifyEditLogs(namesystem,fsimage,logFileName,previousLogTxId);
assertEquals(previousLogTxId,nextLog);
File expectedLog=NNStorage.getInProgressEditsFile(sd,previousLogTxId);
assertTrue("Expect " + expectedLog + " to exist",expectedLog.exists());
}
}
finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Tests saving fs image while transactions are ongoing.
*/
@Test public void testSaveNamespace() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
AtomicReference caughtErr=new AtomicReference();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=fsimage.getEditLog();
startTransactionWorkers(namesystem,caughtErr);
for (int i=0; i < NUM_SAVE_IMAGE && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
}
catch ( InterruptedException e) {
}
LOG.info("Save " + i + ": entering safe mode");
namesystem.enterSafeMode(false);
long logStartTxId=fsimage.getStorage().getMostRecentCheckpointTxId() + 1;
verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(logStartTxId),logStartTxId);
LOG.info("Save " + i + ": saving namespace");
namesystem.saveNamespace();
LOG.info("Save " + i + ": leaving safemode");
long savedImageTxId=fsimage.getStorage().getMostRecentCheckpointTxId();
verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(logStartTxId,savedImageTxId),logStartTxId);
assertEquals(fsimage.getStorage().getMostRecentCheckpointTxId(),editLog.getLastWrittenTxId() - 1);
namesystem.leaveSafeMode();
LOG.info("Save " + i + ": complete");
}
}
finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* The logSync() method in FSEditLog is unsynchronized whiel syncing
* so that other threads can concurrently enqueue edits while the prior
* sync is ongoing. This test checks that the log is saved correctly
* if the saveImage occurs while the syncing thread is in the unsynchronized middle section.
* This replicates the following manual test proposed by Konstantin:
* I start the name-node in debugger.
* I do -mkdir and stop the debugger in logSync() just before it does flush.
* Then I enter safe mode with another client
* I start saveNamepsace and stop the debugger in
* FSImage.saveFSImage() -> FSEditLog.createEditLogFile()
* -> EditLogFileOutputStream.create() ->
* after truncating the file but before writing LAYOUT_VERSION into it.
* Then I let logSync() run.
* Then I terminate the name-node.
* After that the name-node wont start, since the edits file is broken.
*/
@Test public void testSaveImageWhileSyncInProgress() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=fsimage.getEditLog();
JournalAndStream jas=editLog.getJournals().get(0);
EditLogFileOutputStream spyElos=spy((EditLogFileOutputStream)jas.getCurrentStream());
jas.setCurrentStreamForTests(spyElos);
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterFlush=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterFlush.countDown();
}
}
}
;
Answer blockingFlush=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("Flush called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it to flush section...");
waitToEnterFlush.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to flush. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("Flush complete");
return null;
}
}
;
doAnswer(blockingFlush).when(spyElos).flush();
doAnEditThread.start();
LOG.info("Main thread: waiting to enter flush...");
waitToEnterFlush.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync is in unsynchronized section.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Most of the FSNamesystem methods have a synchronized section where they
* update the name system itself and write to the edit log, and then
* unsynchronized, they call logSync. This test verifies that, if an
* operation has written to the edit log but not yet synced it,
* we wait for that sync before entering safe mode.
*/
@Test public void testSaveRightBeforeSync() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=spy(fsimage.getEditLog());
fsimage.editLog=editLog;
final AtomicReference deferredException=new AtomicReference();
final CountDownLatch waitToEnterSync=new CountDownLatch(1);
final Thread doAnEditThread=new Thread(){
@Override public void run(){
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true);
LOG.info("mkdirs complete");
}
catch ( Throwable ioe) {
LOG.fatal("Got exception",ioe);
deferredException.set(ioe);
waitToEnterSync.countDown();
}
}
}
;
Answer blockingSync=new Answer(){
@Override public Void answer( InvocationOnMock invocation) throws Throwable {
LOG.info("logSync called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it just before logSync...");
waitToEnterSync.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to logSync. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("logSync complete");
return null;
}
}
;
doAnswer(blockingSync).when(editLog).logSync();
doAnEditThread.start();
LOG.info("Main thread: waiting to just before logSync...");
waitToEnterSync.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync about to be called.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits");
long st=Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et=Time.now();
LOG.info("Entered safe mode");
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1));
assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4));
}
finally {
LOG.info("Closing namesystem");
if (namesystem != null) namesystem.close();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDoubleBuffer() throws IOException {
EditsDoubleBuffer buf=new EditsDoubleBuffer(1024);
assertTrue(buf.isFlushed());
byte[] data=new byte[100];
buf.writeRaw(data,0,data.length);
assertEquals("Should count new data correctly",data.length,buf.countBufferedBytes());
assertTrue("Writing to current buffer should not affect flush state",buf.isFlushed());
buf.setReadyToFlush();
assertEquals("Swapping buffers should still count buffered bytes",data.length,buf.countBufferedBytes());
assertFalse(buf.isFlushed());
DataOutputBuffer outBuf=new DataOutputBuffer();
buf.flushTo(outBuf);
assertEquals(data.length,outBuf.getLength());
assertTrue(buf.isFlushed());
assertEquals(0,buf.countBufferedBytes());
buf.writeRaw(data,0,data.length);
assertEquals("Should count new data correctly",data.length,buf.countBufferedBytes());
buf.setReadyToFlush();
buf.flushTo(outBuf);
assertEquals(data.length * 2,outBuf.getLength());
assertEquals(0,buf.countBufferedBytes());
outBuf.close();
}
IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testXAttrMultiAddRemoveErrors() throws Exception {
List existingXAttrs=Lists.newArrayList();
List toAdd=Lists.newArrayList();
toAdd.add(generatedXAttrs.get(0));
toAdd.add(generatedXAttrs.get(1));
toAdd.add(generatedXAttrs.get(2));
toAdd.add(generatedXAttrs.get(0));
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
fail("Specified the same xattr to be set twice");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot specify the same " + "XAttr to be set",e);
}
toAdd.remove(generatedXAttrs.get(0));
existingXAttrs.add(generatedXAttrs.get(0));
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
fail("Set XAttr that is already set without REPLACE flag");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("already exists",e);
}
try {
fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE));
fail("Set XAttr that does not exist without the CREATE flag");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("does not exist",e);
}
toAdd.remove(generatedXAttrs.get(0));
List newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE));
assertEquals("Unexpected toAdd size",2,toAdd.size());
for ( XAttr x : toAdd) {
assertTrue("Did not find added XAttr " + x,newXAttrs.contains(x));
}
existingXAttrs=newXAttrs;
toAdd=Lists.newArrayList();
for (int i=0; i < 3; i++) {
XAttr xAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a" + i).setValue(new byte[]{(byte)(i * 2)}).build();
toAdd.add(xAttr);
}
newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE));
assertEquals("Unexpected number of new XAttrs",3,newXAttrs.size());
for (int i=0; i < 3; i++) {
assertArrayEquals("Unexpected XAttr value",new byte[]{(byte)(i * 2)},newXAttrs.get(i).getValue());
}
existingXAttrs=newXAttrs;
toAdd=Lists.newArrayList();
for (int i=0; i < 4; i++) {
toAdd.add(generatedXAttrs.get(i));
}
newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
verifyXAttrsPresent(newXAttrs,4);
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
/**
* Dump the tree, make some changes, and then dump the tree again.
*/
@Test public void testDumpTree() throws Exception {
final INode root=fsdir.getINode("/");
LOG.info("Original tree");
final StringBuffer b1=root.dumpTreeRecursively();
System.out.println("b1=" + b1);
final BufferedReader in=new BufferedReader(new StringReader(b1.toString()));
String line=in.readLine();
checkClassName(line);
for (; (line=in.readLine()) != null; ) {
line=line.trim();
if (!line.isEmpty() && !line.contains("snapshot")) {
assertTrue("line=" + line,line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM) || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM));
checkClassName(line);
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testINodeXAttrsLimit() throws Exception {
List existingXAttrs=Lists.newArrayListWithCapacity(2);
XAttr xAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(new byte[]{0x31,0x32,0x33}).build();
XAttr xAttr2=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a2").setValue(new byte[]{0x31,0x31,0x31}).build();
existingXAttrs.add(xAttr1);
existingXAttrs.add(xAttr2);
XAttr newSystemXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build();
XAttr newRawXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.RAW).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build();
List newXAttrs=Lists.newArrayListWithCapacity(2);
newXAttrs.add(newSystemXAttr);
newXAttrs.add(newRawXAttr);
List xAttrs=fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
assertEquals(xAttrs.size(),4);
XAttr newXAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.TRUSTED).setName("a4").setValue(new byte[]{0x34,0x34,0x34}).build();
newXAttrs.set(0,newXAttr1);
try {
fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE));
fail("Setting user visible xattr on inode should fail if " + "reaching limit.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " + "to inode, would exceed limit",e);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDisplayRecentEditLogOpCodes() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
FSImage fsimage=namesystem.getFSImage();
for (int i=0; i < 20; i++) {
fileSys.mkdirs(new Path("/tmp/tmp" + i));
}
StorageDirectory sd=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
cluster.shutdown();
File editFile=FSImageTestUtil.findLatestEditsLog(sd).getFile();
assertTrue("Should exist: " + editFile,editFile.exists());
long fileLen=editFile.length();
RandomAccessFile rwf=new RandomAccessFile(editFile,"rw");
rwf.seek(fileLen - 40);
for (int i=0; i < 20; i++) {
rwf.write(FSEditLogOpCodes.OP_DELETE.getOpCode());
}
rwf.close();
StringBuilder bld=new StringBuilder();
bld.append("^Error replaying edit log at offset \\d+. ");
bld.append("Expected transaction ID was \\d+\n");
bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).format(false).build();
fail("should not be able to start");
}
catch ( IOException e) {
assertTrue("error message contains opcodes message",e.getMessage().matches(bld.toString()));
}
}
IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testFSEditLogOpCodes() throws IOException {
for ( FSEditLogOpCodes c : FSEditLogOpCodes.values()) {
final byte code=c.getOpCode();
assertEquals("c=" + c + ", code="+ code,c,FSEditLogOpCodes.fromByte(code));
}
for (int b=0; b < (1 << Byte.SIZE); b++) {
final byte code=(byte)b;
assertEquals("b=" + b + ", code="+ code,fromByte(code),FSEditLogOpCodes.fromByte(code));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testValidateEmptyEditLog() throws IOException {
File testDir=new File(TEST_DIR,"testValidateEmptyEditLog");
SortedMap offsetToTxId=Maps.newTreeMap();
File logFile=prepareUnfinalizedTestEditLog(testDir,0,offsetToTxId);
truncateFile(logFile,8);
EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile);
assertTrue(!validation.hasCorruptHeader());
assertEquals(HdfsConstants.INVALID_TXID,validation.getEndTxId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testValidateEditLogWithCorruptBody() throws IOException {
File testDir=new File(TEST_DIR,"testValidateEditLogWithCorruptBody");
SortedMap offsetToTxId=Maps.newTreeMap();
final int NUM_TXNS=20;
File logFile=prepareUnfinalizedTestEditLog(testDir,NUM_TXNS,offsetToTxId);
File logFileBak=new File(testDir,logFile.getName() + ".bak");
Files.copy(logFile,logFileBak);
EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile);
assertTrue(!validation.hasCorruptHeader());
assertEquals(NUM_TXNS + 1,validation.getEndTxId());
for ( Map.Entry entry : offsetToTxId.entrySet()) {
long txOffset=entry.getKey();
long txId=entry.getValue();
Files.copy(logFileBak,logFile);
corruptByteInFile(logFile,txOffset);
validation=EditLogFileInputStream.validateEditLog(logFile);
long expectedEndTxId=(txId == (NUM_TXNS + 1)) ? NUM_TXNS : (NUM_TXNS + 1);
assertEquals("Failed when corrupting txn opcode at " + txOffset,expectedEndTxId,validation.getEndTxId());
assertTrue(!validation.hasCorruptHeader());
}
for ( Map.Entry entry : offsetToTxId.entrySet()) {
long txOffset=entry.getKey();
long txId=entry.getValue();
Files.copy(logFileBak,logFile);
truncateFile(logFile,txOffset);
validation=EditLogFileInputStream.validateEditLog(logFile);
long expectedEndTxId=(txId == 0) ? HdfsConstants.INVALID_TXID : (txId - 1);
assertEquals("Failed when corrupting txid " + txId + " txn opcode "+ "at "+ txOffset,expectedEndTxId,validation.getEndTxId());
assertTrue(!validation.hasCorruptHeader());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Ensure that the digest written by the saver equals to the digest of the
* file.
*/
@Test public void testDigest() throws IOException {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
DistributedFileSystem fs=cluster.getFileSystem();
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
File currentDir=FSImageTestUtil.getNameNodeCurrentDirs(cluster,0).get(0);
File fsimage=FSImageTestUtil.findNewestImageFile(currentDir.getAbsolutePath());
assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),MD5FileUtils.computeMd5ForFile(fsimage));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Ensure mtime and atime can be loaded from fsimage.
*/
@Test(timeout=60000) public void testLoadMtimeAtime() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem hdfs=cluster.getFileSystem();
String userDir=hdfs.getHomeDirectory().toUri().getPath().toString();
Path file=new Path(userDir,"file");
Path dir=new Path(userDir,"/dir");
Path link=new Path(userDir,"/link");
hdfs.createNewFile(file);
hdfs.mkdirs(dir);
hdfs.createSymlink(file,link,false);
long mtimeFile=hdfs.getFileStatus(file).getModificationTime();
long atimeFile=hdfs.getFileStatus(file).getAccessTime();
long mtimeDir=hdfs.getFileStatus(dir).getModificationTime();
long mtimeLink=hdfs.getFileLinkStatus(link).getModificationTime();
long atimeLink=hdfs.getFileLinkStatus(link).getAccessTime();
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(1).build();
cluster.waitActive();
hdfs=cluster.getFileSystem();
assertEquals(mtimeFile,hdfs.getFileStatus(file).getModificationTime());
assertEquals(atimeFile,hdfs.getFileStatus(file).getAccessTime());
assertEquals(mtimeDir,hdfs.getFileStatus(dir).getModificationTime());
assertEquals(mtimeLink,hdfs.getFileLinkStatus(link).getModificationTime());
assertEquals(atimeLink,hdfs.getFileLinkStatus(link).getAccessTime());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Simple test with image, edits, and inprogress edits
*/
@Test public void testCurrentStorageInspector() throws IOException {
FSImageTransactionalStorageInspector inspector=new FSImageTransactionalStorageInspector();
StorageDirectory mockDir=FSImageTestUtil.mockStorageDirectory(NameNodeDirType.IMAGE_AND_EDITS,false,"/foo/current/" + getImageFileName(123),"/foo/current/" + getFinalizedEditsFileName(123,456),"/foo/current/" + getImageFileName(456),"/foo/current/" + getInProgressEditsFileName(457));
inspector.inspectDirectory(mockDir);
assertEquals(2,inspector.foundImages.size());
FSImageFile latestImage=inspector.getLatestImages().get(0);
assertEquals(456,latestImage.txId);
assertSame(mockDir,latestImage.sd);
assertTrue(inspector.isUpgradeFinalized());
assertEquals(new File("/foo/current/" + getImageFileName(456)),latestImage.getFile());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test when there is snapshot taken on root
*/
@Test public void testSnapshotOnRoot() throws Exception {
final Path root=new Path("/");
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root,"s1");
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
INodeDirectory rootNode=fsn.dir.getINode4Write(root.toString()).asDirectory();
assertTrue("The children list of root should be empty",rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
List diffList=rootNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
assertEquals(1,fsn.getSnapshotManager().getNumSnapshottableDirs());
SnapshottableDirectoryStatus[] sdirs=fsn.getSnapshotManager().getSnapshottableDirListing(null);
assertEquals(root,sdirs[0].getFullPath());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn=cluster.getNamesystem();
hdfs=cluster.getFileSystem();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test fsimage loading when 1) there is an empty file loaded from fsimage,
* and 2) there is later an append operation to be applied from edit log.
*/
@Test(timeout=60000) public void testLoadImageWithEmptyFile() throws Exception {
Path file=new Path(dir,"file");
FSDataOutputStream out=hdfs.create(file);
out.close();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
out=hdfs.append(file);
out.write(1);
out.close();
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build();
cluster.waitActive();
hdfs=cluster.getFileSystem();
FileStatus status=hdfs.getFileStatus(file);
assertEquals(1,status.getLen());
}
InternalCallVerifier BooleanVerifier
@Test public void testReset() throws Exception {
Configuration conf=new Configuration();
FSEditLog fsEditLog=Mockito.mock(FSEditLog.class);
FSImage fsImage=Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsn=new FSNamesystem(conf,fsImage);
fsn.imageLoadComplete();
assertTrue(fsn.isImageLoaded());
fsn.clear();
assertFalse(fsn.isImageLoaded());
final INodeDirectory root=(INodeDirectory)fsn.getFSDirectory().getINode("/");
assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
fsn.imageLoadComplete();
assertTrue(fsn.isImageLoaded());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException {
Configuration conf=new Configuration();
FSEditLog fsEditLog=Mockito.mock(FSEditLog.class);
FSImage fsImage=Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsNamesystem=new FSNamesystem(conf,fsImage);
FSNamesystem fsn=Mockito.spy(fsNamesystem);
HAContext haContext=Mockito.mock(HAContext.class);
HAState haState=Mockito.mock(HAState.class);
Mockito.when(haContext.getState()).thenReturn(haState);
Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
Whitebox.setInternalState(fsn,"haContext",haContext);
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode",fsn.isInSafeMode());
assertTrue("Replication queues were being populated during very first " + "safemode",!fsn.isPopulatingReplQueues());
fsn.leaveSafeMode();
assertTrue("FSNamesystem didn't leave safemode",!fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated even after leaving " + "safemode",fsn.isPopulatingReplQueues());
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode",fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated after entering " + "safemode 2nd time",fsn.isPopulatingReplQueues());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that FSNamesystem#clear clears all leases.
*/
@Test public void testFSNamespaceClearLeases() throws Exception {
Configuration conf=new HdfsConfiguration();
File nameDir=new File(MiniDFSCluster.getBaseDirectory(),"name");
conf.set(DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath());
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn=FSNamesystem.loadFromDisk(conf);
LeaseManager leaseMan=fsn.getLeaseManager();
leaseMan.addLease("client1","importantFile");
assertEquals(1,leaseMan.countLease());
fsn.clear();
leaseMan=fsn.getLeaseManager();
assertEquals(0,leaseMan.countLease());
}
InternalCallVerifier BooleanVerifier
@Test public void testFsLockFairness() throws IOException, InterruptedException {
Configuration conf=new Configuration();
FSEditLog fsEditLog=Mockito.mock(FSEditLog.class);
FSImage fsImage=Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
conf.setBoolean("dfs.namenode.fslock.fair",true);
FSNamesystem fsNamesystem=new FSNamesystem(conf,fsImage);
assertTrue(fsNamesystem.getFsLockForTests().isFair());
conf.setBoolean("dfs.namenode.fslock.fair",false);
fsNamesystem=new FSNamesystem(conf,fsImage);
assertFalse(fsNamesystem.getFsLockForTests().isFair());
}
InternalCallVerifier BooleanVerifier
@Test public void testStartupSafemode() throws IOException {
Configuration conf=new Configuration();
FSImage fsImage=Mockito.mock(FSImage.class);
FSEditLog fsEditLog=Mockito.mock(FSEditLog.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsn=new FSNamesystem(conf,fsImage);
fsn.leaveSafeMode();
assertTrue("After leaving safemode FSNamesystem.isInStartupSafeMode still " + "returned true",!fsn.isInStartupSafeMode());
assertTrue("After leaving safemode FSNamesystem.isInSafeMode still returned" + " true",!fsn.isInSafeMode());
fsn.enterSafeMode(true);
assertTrue("After entering safemode due to low resources FSNamesystem." + "isInStartupSafeMode still returned true",!fsn.isInStartupSafeMode());
assertTrue("After entering safemode due to low resources FSNamesystem." + "isInSafeMode still returned false",fsn.isInSafeMode());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests that the namenode edits dirs are gotten with duplicates removed
*/
@Test public void testUniqueEditDirs() throws IOException {
Configuration config=new Configuration();
config.set(DFS_NAMENODE_EDITS_DIR_KEY,"file://edits/dir, " + "file://edits/dir1,file://edits/dir1");
Collection editsDirs=FSNamesystem.getNamespaceEditsDirs(config);
assertEquals(2,editsDirs.size());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFSNamesystemLockCompatibility(){
FSNamesystemLock rwLock=new FSNamesystemLock(true);
assertEquals(0,rwLock.getReadHoldCount());
rwLock.readLock().lock();
assertEquals(1,rwLock.getReadHoldCount());
rwLock.readLock().lock();
assertEquals(2,rwLock.getReadHoldCount());
rwLock.readLock().unlock();
assertEquals(1,rwLock.getReadHoldCount());
rwLock.readLock().unlock();
assertEquals(0,rwLock.getReadHoldCount());
assertFalse(rwLock.isWriteLockedByCurrentThread());
assertEquals(0,rwLock.getWriteHoldCount());
rwLock.writeLock().lock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(1,rwLock.getWriteHoldCount());
rwLock.writeLock().lock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(2,rwLock.getWriteHoldCount());
rwLock.writeLock().unlock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(1,rwLock.getWriteHoldCount());
rwLock.writeLock().unlock();
assertFalse(rwLock.isWriteLockedByCurrentThread());
assertEquals(0,rwLock.getWriteHoldCount());
}
InternalCallVerifier BooleanVerifier
@Test public void testWithFSNamesystemWriteLock() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
FSNamesystem fsn=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fsn=cluster.getNameNode().namesystem;
fsn.writeLock();
MBeanClient client=new MBeanClient();
client.start();
client.join(20000);
assertTrue("JMX calls are blocked when FSNamesystem's writerlock" + "is owned by another thread",client.succeeded);
client.interrupt();
}
finally {
if (fsn != null && fsn.hasWriteLock()) {
fsn.writeUnlock();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void test() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNameNode().namesystem;
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
String snapshotStats=(String)(mbs.getAttribute(mxbeanName,"SnapshotStats"));
@SuppressWarnings("unchecked") Map stat=(Map)JSON.parse(snapshotStats);
assertTrue(stat.containsKey("SnapshottableDirectories") && (Long)stat.get("SnapshottableDirectories") == fsn.getNumSnapshottableDirs());
assertTrue(stat.containsKey("Snapshots") && (Long)stat.get("Snapshots") == fsn.getNumSnapshots());
Object pendingDeletionBlocks=mbs.getAttribute(mxbeanName,"PendingDeletionBlocks");
assertNotNull(pendingDeletionBlocks);
assertTrue(pendingDeletionBlocks instanceof Long);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=180000) public void testFavoredNodesEndToEnd() throws Exception {
for (int i=0; i < NUM_FILES; i++) {
Random rand=new Random(System.currentTimeMillis() + i);
InetSocketAddress datanode[]=getDatanodes(rand);
Path p=new Path("/filename" + i);
FSDataOutputStream out=dfs.create(p,FsPermission.getDefault(),true,4096,(short)3,4096L,null,datanode);
out.write(SOME_BYTES);
out.close();
BlockLocation[] locations=getBlockLocations(p);
for ( BlockLocation loc : locations) {
String[] hosts=loc.getNames();
String[] hosts1=getStringForInetSocketAddrs(datanode);
assertTrue(compareNodes(hosts,hosts1));
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that inprogress files are handled correct. Set up a single
* edits directory. Fail on after the last roll. Then verify that the
* logs have the expected number of transactions.
*/
@Test public void testInprogressRecovery() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/inprogressrecovery");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),5,new AbortSpec(5,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that in-progress streams aren't counted if we don't ask for
* them.
*/
@Test public void testExcludeInProgressStreams() throws CorruptionException, IOException {
File f=new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,false);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(100,getNumberOfTransactions(jm,1,false,false));
EditLogInputStream elis=getJournalInputStream(jm,90,false);
try {
FSEditLogOp lastReadOp=null;
while ((lastReadOp=elis.readOp()) != null) {
assertTrue(lastReadOp.getTransactionId() <= 100);
}
}
finally {
IOUtils.cleanup(LOG,elis);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that we receive the correct number of transactions when we count
* the number of transactions around gaps.
* Set up a single edits directory, with no failures. Delete the 4th logfile.
* Test that getNumberOfTransactions returns the correct number of
* transactions before this gap and after this gap. Also verify that if you
* try to count on the gap that an exception is thrown.
*/
@Test public void testManyLogsWithGaps() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/manylogswithgaps");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
final long startGapTxId=3 * TXNS_PER_ROLL + 1;
final long endGapTxId=4 * TXNS_PER_ROLL;
File[] files=new File(f,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) {
return true;
}
return false;
}
}
);
assertEquals(1,files.length);
assertTrue(files[0].delete());
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(startGapTxId - 1,getNumberOfTransactions(jm,1,true,true));
assertEquals(0,getNumberOfTransactions(jm,startGapTxId,true,true));
assertEquals(11 * TXNS_PER_ROLL - endGapTxId,getNumberOfTransactions(jm,endGapTxId + 1,true,true));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test the normal operation of loading transactions from
* file journal manager. 3 edits directories are setup without any
* failures. Test that we read in the expected number of transactions.
*/
@Test public void testNormalOperation() throws IOException {
File f1=new File(TestEditLog.TEST_DIR + "/normtest0");
File f2=new File(TestEditLog.TEST_DIR + "/normtest1");
File f3=new File(TestEditLog.TEST_DIR + "/normtest2");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI());
NNStorage storage=setupEdits(editUris,5);
long numJournals=0;
for ( StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false));
numJournals++;
}
assertEquals(3,numJournals);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier ExceptionVerifier HybridVerifier
@Test(expected=IllegalStateException.class) public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException {
File f=new File(TestEditLog.TEST_DIR + "/filejournaltestError");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
String sdRootPath=sd.getRoot().getAbsolutePath();
FileUtil.chmod(sdRootPath,"-w",true);
try {
jm.finalizeLogSegment(0,1);
}
finally {
FileUtil.chmod(sdRootPath,"+w",true);
assertTrue(storage.getRemovedStorageDirs().contains(sd));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Make requests with starting transaction ids which don't match the beginning
* txid of some log segments.
* This should succeed.
*/
@Test public void testAskForTransactionsMidfile() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/askfortransactionsmidfile");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
final int TOTAL_TXIDS=10 * 11;
for (int txid=1; txid <= TOTAL_TXIDS; txid++) {
assertEquals((TOTAL_TXIDS - txid) + 1,getNumberOfTransactions(jm,txid,true,false));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that we can load an edits directory with a corrupt inprogress file.
* The corrupt inprogress file should be moved to the side.
*/
@Test public void testManyLogsWithCorruptInprogress() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
File[] files=new File(f,"current").listFiles(new FilenameFilter(){
@Override public boolean accept( File dir, String name){
if (name.startsWith("edits_inprogress")) {
return true;
}
return false;
}
}
);
assertEquals(files.length,1);
corruptAfterStartSegment(files[0]);
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(10 * TXNS_PER_ROLL + 1,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that FileJournalManager behaves correctly despite inprogress
* files in all its edit log directories. Set up 3 directories and fail
* all on the last roll. Verify that the correct number of transaction
* are then loaded.
*/
@Test public void testInprogressRecoveryAll() throws IOException {
File f1=new File(TestEditLog.TEST_DIR + "/failalltest0");
File f2=new File(TestEditLog.TEST_DIR + "/failalltest1");
File f3=new File(TestEditLog.TEST_DIR + "/failalltest2");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI());
NNStorage storage=setupEdits(editUris,5,new AbortSpec(5,0),new AbortSpec(5,1),new AbortSpec(5,2));
Iterator dirs=storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd=dirs.next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that we can read from a stream created by FileJournalManager.
* Create a single edits directory, failing it on the final roll.
* Then try loading from the point of the 3rd roll. Verify that we read
* the correct number of transactions from this point.
*/
@Test public void testReadFromStream() throws IOException {
File f=new File(TestEditLog.TEST_DIR + "/readfromstream");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0));
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
long expectedTotalTxnCount=TXNS_PER_ROLL * 10 + TXNS_PER_FAIL;
assertEquals(expectedTotalTxnCount,getNumberOfTransactions(jm,1,true,false));
long skippedTxns=(3 * TXNS_PER_ROLL);
long startingTxId=skippedTxns + 1;
long numLoadable=getNumberOfTransactions(jm,startingTxId,true,false);
assertEquals(expectedTotalTxnCount - skippedTxns,numLoadable);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Make sure that we starting reading the correct op when we request a stream
* with a txid in the middle of an edit log file.
*/
@Test public void testReadFromMiddleOfEditLog() throws CorruptionException, IOException {
File f=new File(TestEditLog.TEST_DIR + "/readfrommiddleofeditlog");
NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10);
StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
EditLogInputStream elis=getJournalInputStream(jm,5,true);
try {
FSEditLogOp op=elis.readOp();
assertEquals("read unexpected op",op.getTransactionId(),5);
}
finally {
IOUtils.cleanup(LOG,elis);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test a mixture of inprogress files and finalised. Set up 3 edits
* directories and fail the second on the last roll. Verify that reading
* the transactions, reads from the finalised directories.
*/
@Test public void testInprogressRecoveryMixed() throws IOException {
File f1=new File(TestEditLog.TEST_DIR + "/mixtest0");
File f2=new File(TestEditLog.TEST_DIR + "/mixtest1");
File f3=new File(TestEditLog.TEST_DIR + "/mixtest2");
List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI());
NNStorage storage=setupEdits(editUris,5,new AbortSpec(5,1));
Iterator dirs=storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd=dirs.next();
FileJournalManager jm=new FileJournalManager(conf,sd,storage);
assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false));
sd=dirs.next();
jm=new FileJournalManager(conf,sd,storage);
assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false));
}
InternalCallVerifier BooleanVerifier
/**
* Test that file data becomes available before file is closed.
*/
@Test public void testFileLimit() throws IOException {
Configuration conf=new HdfsConfiguration();
int maxObjects=5;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY,maxObjects);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
int currentNodes=0;
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
FSNamesystem namesys=cluster.getNamesystem();
try {
Path path=new Path("/");
assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory());
currentNodes=1;
for (int i=0; i < maxObjects / 2; i++) {
Path file=new Path("/filestatus" + i);
DFSTestUtil.createFile(fs,file,1024,1024,blockSize,(short)1,seed);
System.out.println("Created file " + file);
currentNodes+=2;
}
boolean hitException=false;
try {
Path file=new Path("/filestatus");
DFSTestUtil.createFile(fs,file,1024,1024,blockSize,(short)1,seed);
System.out.println("Created file " + file);
}
catch ( IOException e) {
hitException=true;
}
assertTrue("Was able to exceed file limit",hitException);
Path file0=new Path("/filestatus0");
fs.delete(file0,true);
System.out.println("Deleted file " + file0);
currentNodes-=2;
waitForLimit(namesys,currentNodes);
DFSTestUtil.createFile(fs,file0,1024,1024,blockSize,(short)1,seed);
System.out.println("Created file " + file0 + " again.");
currentNodes+=2;
file0=new Path("/filestatus0");
fs.delete(file0,true);
System.out.println("Deleted file " + file0 + " again.");
currentNodes-=2;
waitForLimit(namesys,currentNodes);
Path dir=new Path("/dir0/dir1");
fs.mkdirs(dir);
System.out.println("Created directories " + dir);
currentNodes+=2;
waitForLimit(namesys,currentNodes);
hitException=false;
try {
fs.mkdirs(new Path("dir.fail"));
System.out.println("Created directory should not have succeeded.");
}
catch ( IOException e) {
hitException=true;
}
assertTrue("Was able to exceed dir limit",hitException);
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if fsck can return -1 in case of failure
* @throws Exception
*/
@Test public void testFsckError() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
cluster=new MiniDFSCluster.Builder(conf).build();
String fileName="/test.txt";
Path filePath=new Path(fileName);
FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,(short)1,1L);
DFSTestUtil.waitReplication(fs,filePath,(short)1);
INodeFile node=(INodeFile)cluster.getNamesystem().dir.getNode(fileName,true);
final BlockInfo[] blocks=node.getBlocks();
assertEquals(blocks.length,1);
blocks[0].setNumBytes(-1L);
String outStr=runFsck(conf,-1,true,fileName);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
fs.delete(filePath,true);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test fsck with permission set on inodes
*/
@Test public void testFsckPermission() throws Exception {
final DFSTestUtil util=new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(20).build();
final Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
final MiniDFSCluster c2=cluster;
final String dir="/dfsck";
final Path dirpath=new Path(dir);
final FileSystem fs=c2.getFileSystem();
util.createFiles(fs,dir);
util.waitReplication(fs,dir,(short)3);
fs.setPermission(dirpath,new FsPermission((short)0700));
UserGroupInformation fakeUGI=UserGroupInformation.createUserForTesting("ProbablyNotARealUserName",new String[]{"ShangriLa"});
fakeUGI.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
System.out.println(runFsck(conf,-1,true,dir));
return null;
}
}
);
fs.setPermission(dirpath,new FsPermission((short)0777));
fakeUGI.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
final String outStr=runFsck(conf,0,true,dir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
return null;
}
}
);
util.cleanup(fs,dir);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that the # of missing block replicas and expected replicas is correct
* @throws IOException
*/
@Test public void testFsckMissingReplicas() throws IOException {
final short REPL_FACTOR=2;
final short NUM_REPLICAS=1;
final short NUM_BLOCKS=3;
final long blockSize=512;
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize);
MiniDFSCluster cluster=null;
DistributedFileSystem dfs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
assertNotNull("Failed Cluster Creation",cluster);
cluster.waitClusterUp();
dfs=cluster.getFileSystem();
assertNotNull("Failed to get FileSystem",dfs);
final String pathString=new String("/testfile");
final Path path=new Path(pathString);
long fileLen=blockSize * NUM_BLOCKS;
DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1);
NameNode namenode=cluster.getNameNode();
NetworkTopology nettop=cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
Map pmap=new HashMap();
Writer result=new StringWriter();
PrintWriter out=new PrintWriter(result,true);
InetAddress remoteAddress=InetAddress.getLocalHost();
NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_REPLICAS,(short)1,remoteAddress);
final HdfsFileStatus file=namenode.getRpcServer().getFileInfo(pathString);
assertNotNull(file);
Result res=new Result(conf);
fsck.check(pathString,file,res);
System.out.println(result.toString());
assertEquals(res.missingReplicas,(NUM_BLOCKS * REPL_FACTOR) - (NUM_BLOCKS * NUM_REPLICAS));
assertEquals(res.numExpectedReplicas,NUM_BLOCKS * REPL_FACTOR);
}
finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* do fsck
*/
@Test public void testFsck() throws Exception {
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(20).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
final long precision=1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs=cluster.getFileSystem();
final String fileName="/srcdat";
util.createFiles(fs,fileName);
util.waitReplication(fs,fileName,(short)3);
final Path file=new Path(fileName);
long aTime=fs.getFileStatus(file).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr=runFsck(conf,0,true,"/");
verifyAuditLogs();
assertEquals(aTime,fs.getFileStatus(file).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
outStr=runFsck(conf,1,true,"/");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
System.out.println(outStr);
cluster.startDataNodes(conf,4,true,null,null);
cluster.waitActive();
cluster.waitClusterUp();
fs=cluster.getFileSystem();
util.cleanup(fs,"/srcdat");
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCorruptBlock() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
FileSystem fs=null;
DFSClient dfsClient=null;
LocatedBlocks blocks=null;
int replicaCount=0;
Random random=new Random();
String outStr=null;
short factor=1;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs=cluster.getFileSystem();
Path file1=new Path("/testCorruptBlock");
DFSTestUtil.createFile(fs,file1,1024,factor,0);
DFSTestUtil.waitReplication(fs,file1,factor);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1);
outStr=runFsck(conf,0,true,"/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
File blockFile=MiniDFSCluster.getBlockFile(0,block);
if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile=new RandomAccessFile(blockFile,"rw");
FileChannel channel=raFile.getChannel();
String badString="BADBAD";
int rand=random.nextInt((int)channel.size() / 2);
raFile.seek(rand);
raFile.write(badString.getBytes());
raFile.close();
}
try {
IOUtils.copyBytes(fs.open(file1),new IOUtils.NullOutputStream(),conf,true);
}
catch ( IOException ie) {
}
dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
replicaCount=blocks.get(0).getLocations().length;
while (replicaCount != factor) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ignore) {
}
blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE);
replicaCount=blocks.get(0).getLocations().length;
}
assertTrue(blocks.get(0).isCorrupt());
outStr=runFsck(conf,1,true,"/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
assertTrue(outStr.contains("testCorruptBlock"));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests that the # of misreplaced replicas is correct
* @throws IOException
*/
@Test public void testFsckMisPlacedReplicas() throws IOException {
final short REPL_FACTOR=2;
short NUM_DN=2;
final short NUM_BLOCKS=3;
final long blockSize=512;
String[] racks={"/rack1","/rack1"};
String[] hosts={"host1","host2"};
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize);
MiniDFSCluster cluster=null;
DistributedFileSystem dfs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts).racks(racks).build();
assertNotNull("Failed Cluster Creation",cluster);
cluster.waitClusterUp();
dfs=cluster.getFileSystem();
assertNotNull("Failed to get FileSystem",dfs);
final String pathString=new String("/testfile");
final Path path=new Path(pathString);
long fileLen=blockSize * NUM_BLOCKS;
DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1);
NameNode namenode=cluster.getNameNode();
NetworkTopology nettop=cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2","/host3"));
NUM_DN++;
Map pmap=new HashMap();
Writer result=new StringWriter();
PrintWriter out=new PrintWriter(result,true);
InetAddress remoteAddress=InetAddress.getLocalHost();
NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_DN,REPL_FACTOR,remoteAddress);
final HdfsFileStatus file=namenode.getRpcServer().getFileInfo(pathString);
assertNotNull(file);
Result res=new Result(conf);
fsck.check(pathString,file,res);
assertEquals(res.numMisReplicatedBlocks,NUM_BLOCKS);
}
finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsckMoveAndDelete() throws Exception {
final int MAX_MOVE_TRIES=5;
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsckMoveAndDelete").setNumFiles(5).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
String topDir="/srcdat";
fs=cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs,topDir);
util.waitReplication(fs,topDir,(short)3);
String outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
String[] fileNames=util.getFileNames(topDir);
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
String corruptFileName=fileNames[0];
ExtendedBlock block=dfsClient.getNamenode().getBlockLocations(corruptFileName,0,Long.MAX_VALUE).get(0).getBlock();
for (int i=0; i < 4; i++) {
File blockFile=MiniDFSCluster.getBlockFile(i,block);
if (blockFile != null && blockFile.exists()) {
assertTrue(blockFile.delete());
}
}
outStr=runFsck(conf,1,false,"/");
while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ignore) {
}
outStr=runFsck(conf,1,false,"/");
}
for (int i=0; i < MAX_MOVE_TRIES; i++) {
outStr=runFsck(conf,1,true,"/","-move");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
String[] newFileNames=util.getFileNames(topDir);
boolean found=false;
for ( String f : newFileNames) {
if (f.equals(corruptFileName)) {
found=true;
break;
}
}
assertTrue(found);
}
outStr=runFsck(conf,1,true,"/","-move","-delete");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
util.cleanup(fs,topDir);
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
cluster.shutdown();
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsckMove() throws Exception {
Configuration conf=new HdfsConfiguration();
final int DFS_BLOCK_SIZE=1024;
final int NUM_DATANODES=4;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFS_BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
DFSTestUtil util=new DFSTestUtil("TestFsck",5,3,(5 * DFS_BLOCK_SIZE) + (DFS_BLOCK_SIZE - 1),5 * DFS_BLOCK_SIZE);
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
String topDir="/srcdat";
fs=cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs,topDir);
util.waitReplication(fs,topDir,(short)3);
String outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf);
String fileNames[]=util.getFileNames(topDir);
CorruptedTestFile ctFiles[]=new CorruptedTestFile[]{new CorruptedTestFile(fileNames[0],Sets.newHashSet(0),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[1],Sets.newHashSet(2,3),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[2],Sets.newHashSet(4),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[3],Sets.newHashSet(0,1,2,3),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE),new CorruptedTestFile(fileNames[4],Sets.newHashSet(1,2,3,4),dfsClient,NUM_DATANODES,DFS_BLOCK_SIZE)};
int totalMissingBlocks=0;
for ( CorruptedTestFile ctFile : ctFiles) {
totalMissingBlocks+=ctFile.getTotalMissingBlocks();
}
for ( CorruptedTestFile ctFile : ctFiles) {
ctFile.removeBlocks();
}
while (true) {
outStr=runFsck(conf,1,false,"/");
String numCorrupt=null;
for ( String line : outStr.split(LINE_SEPARATOR)) {
Matcher m=numCorruptBlocksPattern.matcher(line);
if (m.matches()) {
numCorrupt=m.group(1);
break;
}
}
if (numCorrupt == null) {
throw new IOException("failed to find number of corrupt " + "blocks in fsck output.");
}
if (numCorrupt.equals(Integer.toString(totalMissingBlocks))) {
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
break;
}
try {
Thread.sleep(100);
}
catch ( InterruptedException ignore) {
}
}
outStr=runFsck(conf,1,false,"/","-move");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
for ( CorruptedTestFile ctFile : ctFiles) {
ctFile.checkSalvagedRemains();
}
outStr=runFsck(conf,1,true,"/","-delete");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
util.cleanup(fs,topDir);
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFsckNonExistent() throws Exception {
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(20).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs=cluster.getFileSystem();
util.createFiles(fs,"/srcdat");
util.waitReplication(fs,"/srcdat",(short)3);
String outStr=runFsck(conf,0,true,"/non-existent");
assertEquals(-1,outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
System.out.println(outStr);
util.cleanup(fs,"/srcdat");
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test fsck with symlinks in the filesystem
*/
@Test public void testFsckSymlink() throws Exception {
final DFSTestUtil util=new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build();
final Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
final long precision=1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs=cluster.getFileSystem();
final String fileName="/srcdat";
util.createFiles(fs,fileName);
final FileContext fc=FileContext.getFileContext(cluster.getConfiguration(0));
final Path file=new Path(fileName);
final Path symlink=new Path("/srcdat-symlink");
fc.createSymlink(file,symlink,false);
util.waitReplication(fs,fileName,(short)3);
long aTime=fc.getFileStatus(symlink).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr=runFsck(conf,0,true,"/");
verifyAuditLogs();
assertEquals(aTime,fc.getFileStatus(symlink).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertTrue(outStr.contains("Total symlinks:\t\t1"));
util.cleanup(fs,fileName);
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* check if option -list-corruptfiles of fsck command works properly
*/
@Test public void testFsckListCorruptFilesBlocks() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
FileSystem fs=null;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs,"/corruptData",(short)1);
util.waitReplication(fs,"/corruptData",(short)1);
String outStr=runFsck(conf,0,false,"/corruptData","-list-corruptfileblocks");
System.out.println("1. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
final String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 4; i++) {
for (int j=0; j <= 1; j++) {
File storageDir=cluster.getInstanceStorageDir(i,j);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.",blockFile.delete());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
}
final NamenodeProtocols namenode=cluster.getNameNodeRpc();
CorruptFileBlocks corruptFileBlocks=namenode.listCorruptFileBlocks("/corruptData",null);
int numCorrupt=corruptFileBlocks.getFiles().length;
while (numCorrupt == 0) {
Thread.sleep(1000);
corruptFileBlocks=namenode.listCorruptFileBlocks("/corruptData",null);
numCorrupt=corruptFileBlocks.getFiles().length;
}
outStr=runFsck(conf,-1,true,"/corruptData","-list-corruptfileblocks");
System.out.println("2. bad fsck out: " + outStr);
assertTrue(outStr.contains("has 3 CORRUPT files"));
util.createFiles(fs,"/goodData");
outStr=runFsck(conf,0,true,"/goodData","-list-corruptfileblocks");
System.out.println("3. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
util.cleanup(fs,"/corruptData");
util.cleanup(fs,"/goodData");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFsckOpenFiles() throws Exception {
DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(4).build();
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
String topDir="/srcdat";
String randomString="HADOOP ";
fs=cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs,topDir);
util.waitReplication(fs,topDir,(short)3);
String outStr=runFsck(conf,0,true,"/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
Path openFile=new Path(topDir + "/openFile");
FSDataOutputStream out=fs.create(openFile);
int writeCount=0;
while (writeCount != 100) {
out.write(randomString.getBytes());
writeCount++;
}
outStr=runFsck(conf,0,true,topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
outStr=runFsck(conf,0,true,topDir,"-openforwrite");
System.out.println(outStr);
assertTrue(outStr.contains("OPENFORWRITE"));
assertTrue(outStr.contains("openFile"));
out.close();
outStr=runFsck(conf,0,true,topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
util.cleanup(fs,topDir);
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
cluster.shutdown();
}
finally {
if (fs != null) {
try {
fs.close();
}
catch ( Exception e) {
}
}
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test that a dummy implementation of JournalManager can
* be initialized on startup
*/
@Test public void testDummyJournalManager() throws Exception {
MiniDFSCluster cluster=null;
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",DummyJournalManager.class.getName());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,DUMMY_URI);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,0);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
assertTrue(DummyJournalManager.shouldPromptCalled);
assertTrue(DummyJournalManager.formatCalled);
assertNotNull(DummyJournalManager.conf);
assertEquals(new URI(DUMMY_URI),DummyJournalManager.uri);
assertNotNull(DummyJournalManager.nsInfo);
assertEquals(DummyJournalManager.nsInfo.getClusterID(),cluster.getNameNode().getNamesystem().getClusterId());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testIsValidRequestor() throws IOException {
Configuration conf=new HdfsConfiguration();
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
conf.set(DFSConfigKeys.DFS_NAMESERVICES,"ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),"host1:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,"ns1","nn1"),"hdfs/_HOST@TEST-REALM.COM");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),"host2:1234");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,"ns1","nn2"),"hdfs/_HOST@TEST-REALM.COM");
NameNode.initializeGenericKeys(conf,"ns1","nn1");
AccessControlList acls=Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false);
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
assertTrue(ImageServlet.isValidRequestor(context,"hdfs/host2@TEST-REALM.COM",conf));
Mockito.when(acls.isUserAllowed(Mockito.argThat(new ArgumentMatcher(){
@Override public boolean matches( Object argument){
return ((UserGroupInformation)argument).getShortUserName().equals("atm");
}
}
))).thenReturn(true);
assertTrue(ImageServlet.isValidRequestor(context,"hdfs/host2@TEST-REALM.COM",conf));
assertTrue(ImageServlet.isValidRequestor(context,"atm@TEST-REALM.COM",conf));
assertFalse(ImageServlet.isValidRequestor(context,"todd@TEST-REALM.COM",conf));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that the concat operation is properly persisted in the
* edit log, and properly replayed on restart.
*/
@Test public void testConcatInEditLog() throws Exception {
final Path TEST_DIR=new Path("/testConcatInEditLog");
final long FILE_LEN=blockSize;
Path[] srcFiles=new Path[3];
for (int i=0; i < srcFiles.length; i++) {
Path path=new Path(TEST_DIR,"src-" + i);
DFSTestUtil.createFile(dfs,path,FILE_LEN,REPL_FACTOR,1);
srcFiles[i]=path;
}
Path targetFile=new Path(TEST_DIR,"target");
DFSTestUtil.createFile(dfs,targetFile,FILE_LEN,REPL_FACTOR,1);
dfs.concat(targetFile,srcFiles);
assertTrue(dfs.exists(targetFile));
FileStatus origStatus=dfs.getFileStatus(targetFile);
cluster.restartNameNode(true);
assertTrue(dfs.exists(targetFile));
assertFalse(dfs.exists(srcFiles[0]));
FileStatus statusAfterRestart=dfs.getFileStatus(targetFile);
assertEquals(origStatus.getModificationTime(),statusAfterRestart.getModificationTime());
}
IterativeVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Concatenates 10 files into one
* Verifies the final size, deletion of the file, number of blocks
* @throws IOException
*/
@Test public void testConcat() throws IOException, InterruptedException {
final int numFiles=10;
long fileLen=blockSize * 3;
HdfsFileStatus fStatus;
FSDataInputStream stm;
String trg=new String("/trg");
Path trgPath=new Path(trg);
DFSTestUtil.createFile(dfs,trgPath,fileLen,REPL_FACTOR,1);
fStatus=nn.getFileInfo(trg);
long trgLen=fStatus.getLen();
long trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount();
Path[] files=new Path[numFiles];
byte[][] bytes=new byte[numFiles][(int)fileLen];
LocatedBlocks[] lblocks=new LocatedBlocks[numFiles];
long[] lens=new long[numFiles];
int i=0;
for (i=0; i < files.length; i++) {
files[i]=new Path("/file" + i);
Path path=files[i];
System.out.println("Creating file " + path);
DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1);
fStatus=nn.getFileInfo(path.toUri().getPath());
lens[i]=fStatus.getLen();
assertEquals(trgLen,lens[i]);
lblocks[i]=nn.getBlockLocations(path.toUri().getPath(),0,lens[i]);
stm=dfs.open(path);
stm.readFully(0,bytes[i]);
stm.close();
}
final UserGroupInformation user1=UserGroupInformation.createUserForTesting("theDoctor",new String[]{"tardis"});
DistributedFileSystem hdfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(user1,conf);
try {
hdfs.concat(trgPath,files);
fail("Permission exception expected");
}
catch ( IOException ie) {
System.out.println("Got expected exception for permissions:" + ie.getLocalizedMessage());
}
ContentSummary cBefore=dfs.getContentSummary(trgPath.getParent());
dfs.concat(trgPath,files);
ContentSummary cAfter=dfs.getContentSummary(trgPath.getParent());
assertEquals(cBefore.getFileCount(),cAfter.getFileCount() + files.length);
long totalLen=trgLen;
long totalBlocks=trgBlocks;
for (i=0; i < files.length; i++) {
totalLen+=lens[i];
totalBlocks+=lblocks[i].locatedBlockCount();
}
System.out.println("total len=" + totalLen + "; totalBlocks="+ totalBlocks);
fStatus=nn.getFileInfo(trg);
trgLen=fStatus.getLen();
stm=dfs.open(trgPath);
byte[] byteFileConcat=new byte[(int)trgLen];
stm.readFully(0,byteFileConcat);
stm.close();
trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount();
assertEquals(trgBlocks,totalBlocks);
assertEquals(trgLen,totalLen);
for ( Path p : files) {
fStatus=nn.getFileInfo(p.toUri().getPath());
assertNull("File " + p + " still exists",fStatus);
DFSTestUtil.createFile(dfs,p,fileLen,REPL_FACTOR,1);
}
checkFileContent(byteFileConcat,bytes);
Path smallFile=new Path("/sfile");
int sFileLen=10;
DFSTestUtil.createFile(dfs,smallFile,sFileLen,REPL_FACTOR,1);
dfs.concat(trgPath,new Path[]{smallFile});
fStatus=nn.getFileInfo(trg);
trgLen=fStatus.getLen();
trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount();
assertEquals(trgBlocks,totalBlocks + 1);
assertEquals(trgLen,totalLen + sFileLen);
}
TestInitializer InternalCallVerifier NullVerifier HybridVerifier
@Before public void startUpCluster() throws IOException {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
assertNotNull("Failed Cluster Creation",cluster);
cluster.waitClusterUp();
dfs=cluster.getFileSystem();
assertNotNull("Failed to get FileSystem",dfs);
nn=cluster.getNameNodeRpc();
assertNotNull("Failed to get NameNode",nn);
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testConcatNotCompleteBlock() throws IOException {
long trgFileLen=blockSize * 3;
long srcFileLen=blockSize * 3 + 20;
String name1="/trg", name2="/src";
Path filePath1=new Path(name1);
DFSTestUtil.createFile(dfs,filePath1,trgFileLen,REPL_FACTOR,1);
HdfsFileStatus fStatus=nn.getFileInfo(name1);
long fileLen=fStatus.getLen();
assertEquals(fileLen,trgFileLen);
FSDataInputStream stm=dfs.open(filePath1);
byte[] byteFile1=new byte[(int)trgFileLen];
stm.readFully(0,byteFile1);
stm.close();
LocatedBlocks lb1=nn.getBlockLocations(name1,0,trgFileLen);
Path filePath2=new Path(name2);
DFSTestUtil.createFile(dfs,filePath2,srcFileLen,REPL_FACTOR,1);
fStatus=nn.getFileInfo(name2);
fileLen=fStatus.getLen();
assertEquals(srcFileLen,fileLen);
stm=dfs.open(filePath2);
byte[] byteFile2=new byte[(int)srcFileLen];
stm.readFully(0,byteFile2);
stm.close();
LocatedBlocks lb2=nn.getBlockLocations(name2,0,srcFileLen);
System.out.println("trg len=" + trgFileLen + "; src len="+ srcFileLen);
dfs.concat(filePath1,new Path[]{filePath2});
long totalLen=trgFileLen + srcFileLen;
fStatus=nn.getFileInfo(name1);
fileLen=fStatus.getLen();
stm=dfs.open(filePath1);
byte[] byteFileConcat=new byte[(int)fileLen];
stm.readFully(0,byteFileConcat);
stm.close();
LocatedBlocks lbConcat=nn.getBlockLocations(name1,0,fileLen);
assertEquals(lbConcat.locatedBlockCount(),lb1.locatedBlockCount() + lb2.locatedBlockCount());
System.out.println("file1 len=" + fileLen + "; total len="+ totalLen);
assertEquals(fileLen,totalLen);
fStatus=nn.getFileInfo(name2);
assertNull("File " + name2 + "still exists",fStatus);
checkFileContent(byteFileConcat,new byte[][]{byteFile1,byteFile2});
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* test illegal args cases
*/
@Test public void testIllegalArg() throws IOException {
long fileLen=blockSize * 3;
Path parentDir=new Path("/parentTrg");
assertTrue(dfs.mkdirs(parentDir));
Path trg=new Path(parentDir,"trg");
DFSTestUtil.createFile(dfs,trg,fileLen,REPL_FACTOR,1);
{
Path dir1=new Path("/dir1");
assertTrue(dfs.mkdirs(dir1));
Path src=new Path(dir1,"src");
DFSTestUtil.createFile(dfs,src,fileLen,REPL_FACTOR,1);
try {
dfs.concat(trg,new Path[]{src});
fail("didn't fail for src and trg in different directories");
}
catch ( Exception e) {
}
}
try {
dfs.concat(trg,new Path[]{new Path("test1/a")});
fail("didn't fail with invalid arguments");
}
catch ( Exception e) {
}
try {
dfs.concat(trg,new Path[]{});
fail("didn't fail with invalid arguments");
}
catch ( Exception e) {
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostsExcludeInUI() throws Exception {
Configuration conf=getConf();
short REPLICATION_FACTOR=2;
final Path filePath=new Path("/testFile");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,"");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
String racks[]={"/rack1","/rack1","/rack2","/rack2"};
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
try {
final FileSystem fs=cluster.getFileSystem();
DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L);
ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(filePath),0,Long.MAX_VALUE);
String name=locs[0].getNames()[0];
String names=name + "\n" + "localhost:42\n";
LOG.info("adding '" + names + "' to exclude file "+ excludeFile.toUri().getPath());
DFSTestUtil.writeFile(localFileSys,excludeFile,name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs,name);
DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
String nodes=(String)mbs.getAttribute(mxbeanName,"LiveNodes");
assertTrue("Live nodes should contain the decommissioned node",nodes.contains("Decommissioned"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostsIncludeForDeadCount() throws Exception {
Configuration conf=getConf();
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/decommission");
Path excludeFile=new Path(dir,"exclude");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts=new StringBuilder();
includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777").append("\n");
DFSTestUtil.writeFile(localFileSys,excludeFile,"");
DFSTestUtil.writeFile(localFileSys,includeFile,includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FSNamesystem ns=cluster.getNameNode().getNamesystem();
assertTrue(ns.getNumDeadDataNodes() == 2);
assertTrue(ns.getNumLiveDataNodes() == 0);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
String nodes=mbs.getAttribute(mxbeanName,"NumDeadDataNodes") + "";
assertTrue((Integer)mbs.getAttribute(mxbeanName,"NumDeadDataNodes") == 2);
assertTrue((Integer)mbs.getAttribute(mxbeanName,"NumLiveDataNodes") == 0);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testFilesInGetListingOps() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
hdfs.mkdirs(new Path("/tmp"));
DFSTestUtil.createFile(hdfs,new Path("/tmp/f1"),0,(short)1,0);
DFSTestUtil.createFile(hdfs,new Path("/tmp/f2"),0,(short)1,0);
DFSTestUtil.createFile(hdfs,new Path("/tmp/f3"),0,(short)1,0);
DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp",HdfsFileStatus.EMPTY_NAME,false);
assertTrue(dl.getPartialListing().length == 3);
String f2=new String("f2");
dl=cluster.getNameNodeRpc().getListing("/tmp",f2.getBytes(),false);
assertTrue(dl.getPartialListing().length == 1);
INode f2INode=fsdir.getINode("/tmp/f2");
String f2InodePath="/.reserved/.inodes/" + f2INode.getId();
dl=cluster.getNameNodeRpc().getListing("/tmp",f2InodePath.getBytes(),false);
assertTrue(dl.getPartialListing().length == 1);
hdfs.delete(new Path("/tmp/f2"),false);
try {
dl=cluster.getNameNodeRpc().getListing("/tmp",f2InodePath.getBytes(),false);
fail("Didn't get exception for the deleted startAfter token.");
}
catch ( IOException e) {
assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
}
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFileUnderConstruction(){
replication=3;
final INodeFile file=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L);
assertFalse(file.isUnderConstruction());
final String clientName="client";
final String clientMachine="machine";
file.toUnderConstruction(clientName,clientMachine);
assertTrue(file.isUnderConstruction());
FileUnderConstructionFeature uc=file.getFileUnderConstructionFeature();
assertEquals(clientName,uc.getClientName());
assertEquals(clientMachine,uc.getClientMachine());
file.toCompleteFile(Time.now());
assertFalse(file.isUnderConstruction());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Tests for addressing files using /.reserved/.inodes/ in file system
* operations.
*/
@Test public void testInodeIdBasedPaths() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
NamenodeProtocols nnRpc=cluster.getNameNodeRpc();
Path baseDir=getInodePath(INodeId.ROOT_INODE_ID,"testInodeIdBasedPaths");
Path baseDirRegPath=new Path("/testInodeIdBasedPaths");
fs.mkdirs(baseDir);
fs.exists(baseDir);
long baseDirFileId=nnRpc.getFileInfo(baseDir.toString()).getFileId();
Path testFileInodePath=getInodePath(baseDirFileId,"test1");
Path testFileRegularPath=new Path(baseDir,"test1");
final int testFileBlockSize=1024;
FileSystemTestHelper.createFile(fs,testFileInodePath,1,testFileBlockSize);
assertTrue(fs.exists(testFileInodePath));
FsPermission perm=new FsPermission((short)0666);
fs.setPermission(testFileInodePath,perm);
FileStatus fileStatus=fs.getFileStatus(testFileInodePath);
assertEquals(perm,fileStatus.getPermission());
fs.setOwner(testFileInodePath,fileStatus.getOwner(),fileStatus.getGroup());
fs.setTimes(testFileInodePath,0,0);
fileStatus=fs.getFileStatus(testFileInodePath);
assertEquals(0,fileStatus.getModificationTime());
assertEquals(0,fileStatus.getAccessTime());
fs.setReplication(testFileInodePath,(short)3);
fileStatus=fs.getFileStatus(testFileInodePath);
assertEquals(3,fileStatus.getReplication());
fs.setReplication(testFileInodePath,(short)1);
assertEquals(testFileBlockSize,nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
{
fs.isFileClosed(testFileInodePath);
fs.getAclStatus(testFileInodePath);
fs.getXAttrs(testFileInodePath);
fs.listXAttrs(testFileInodePath);
fs.access(testFileInodePath,FsAction.READ_WRITE);
}
String invalidTarget=new Path(baseDir,"invalidTarget").toString();
String link=new Path(baseDir,"link").toString();
testInvalidSymlinkTarget(nnRpc,invalidTarget,link);
String validTarget="/validtarget";
testValidSymlinkTarget(nnRpc,validTarget,link);
fs.append(testFileInodePath);
fs.recoverLease(testFileInodePath);
LocatedBlocks l1=nnRpc.getBlockLocations(testFileInodePath.toString(),0,Long.MAX_VALUE);
LocatedBlocks l2=nnRpc.getBlockLocations(testFileRegularPath.toString(),0,Long.MAX_VALUE);
checkEquals(l1,l2);
Path renameDst=getInodePath(baseDirFileId,"test2");
fileStatus=fs.getFileStatus(testFileInodePath);
fs.rename(testFileInodePath,renameDst);
fs.rename(renameDst,testFileInodePath);
assertEquals(fileStatus,fs.getFileStatus(testFileInodePath));
fs.rename(testFileInodePath,renameDst,Rename.OVERWRITE);
fs.rename(renameDst,testFileInodePath,Rename.OVERWRITE);
assertEquals(fileStatus,fs.getFileStatus(testFileInodePath));
assertEquals(fs.getContentSummary(testFileRegularPath).toString(),fs.getContentSummary(testFileInodePath).toString());
checkEquals(fs.listFiles(baseDirRegPath,false),fs.listFiles(baseDir,false));
fs.delete(testFileInodePath,true);
assertFalse(fs.exists(testFileInodePath));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test verifies inode ID counter and inode map functionality.
*/
@Test public void testInodeId() throws IOException {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNamesystem();
long lastId=fsn.getLastInodeId();
int inodeCount=1;
long expectedLastInodeId=INodeId.ROOT_INODE_ID;
assertEquals(fsn.dir.rootDir.getId(),INodeId.ROOT_INODE_ID);
assertEquals(expectedLastInodeId,lastId);
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/test1");
assertTrue(fs.mkdirs(path));
assertEquals(++expectedLastInodeId,fsn.getLastInodeId());
assertEquals(++inodeCount,fsn.dir.getInodeMapSize());
NamenodeProtocols nnrpc=cluster.getNameNodeRpc();
DFSTestUtil.createFile(fs,new Path("/test1/file"),1024,(short)1,0);
assertEquals(++expectedLastInodeId,fsn.getLastInodeId());
assertEquals(++inodeCount,fsn.dir.getInodeMapSize());
HdfsFileStatus fileStatus=nnrpc.getFileInfo("/test1/file");
assertEquals(expectedLastInodeId,fileStatus.getFileId());
Path renamedPath=new Path("/test2");
assertTrue(fs.rename(path,renamedPath));
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
assertTrue(fs.delete(renamedPath,true));
inodeCount-=2;
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
String file1="/test1/file1";
String file2="/test1/file2";
DFSTestUtil.createFile(fs,new Path(file1),512,(short)1,0);
DFSTestUtil.createFile(fs,new Path(file2),512,(short)1,0);
inodeCount+=3;
expectedLastInodeId+=3;
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
nnrpc.concat(file2,new String[]{file1});
inodeCount--;
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertTrue(fs.delete(new Path("/test1"),true));
inodeCount-=2;
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
cluster.restartNameNode();
cluster.waitActive();
fsn=cluster.getNamesystem();
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
DFSTestUtil.createFile(fs,new Path("/test2/file2"),1024,(short)1,0);
expectedLastInodeId+=2;
inodeCount+=2;
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
FSDataOutputStream outStream=fs.create(new Path("/test3/file"));
assertTrue(outStream != null);
expectedLastInodeId+=2;
inodeCount+=2;
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
fsn.enterSafeMode(false);
fsn.saveNamespace();
fsn.leaveSafeMode();
outStream.close();
cluster.restartNameNode();
cluster.waitActive();
fsn=cluster.getNamesystem();
assertEquals(expectedLastInodeId,fsn.getLastInodeId());
assertEquals(inodeCount,fsn.dir.getInodeMapSize());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=120000) public void testWriteToDeletedFile() throws IOException {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/test1");
assertTrue(fs.mkdirs(path));
int size=conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,512);
byte[] data=new byte[size];
Path filePath=new Path("/test1/file");
FSDataOutputStream fos=fs.create(filePath);
fs.delete(filePath,false);
try {
fos.write(data,0,data.length);
fos.hflush();
fail("Write should fail after delete");
}
catch ( Exception e) {
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Check /.reserved path is reserved and cannot be created.
*/
@Test public void testReservedFileNames() throws IOException {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
ensureReservedFileNamesCannotBeCreated(fs,"/.reserved",false);
ensureReservedFileNamesCannotBeCreated(fs,"/.reserved",false);
Path reservedPath=new Path("/.reserved");
FSDirectory.CHECK_RESERVED_FILE_NAMES=false;
fs.mkdirs(reservedPath);
assertTrue(fs.isDirectory(reservedPath));
ensureReservedFileNamesCannotBeLoaded(cluster);
FSDirectory.CHECK_RESERVED_FILE_NAMES=false;
ensureClusterRestartSucceeds(cluster);
fs.delete(reservedPath,true);
DFSTestUtil.createFile(fs,reservedPath,10,(short)1,0L);
assertTrue(!fs.isDirectory(reservedPath));
ensureReservedFileNamesCannotBeLoaded(cluster);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDotdotInodePath() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
DFSClient client=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
final Path dir=new Path("/dir");
hdfs.mkdirs(dir);
long dirId=fsdir.getINode(dir.toString()).getId();
long parentId=fsdir.getINode("/").getId();
String testPath="/.reserved/.inodes/" + dirId + "/..";
client=new DFSClient(NameNode.getAddress(conf),conf);
HdfsFileStatus status=client.getFileInfo(testPath);
assertTrue(parentId == status.getFileId());
testPath="/.reserved/.inodes/" + parentId + "/..";
status=client.getFileInfo(testPath);
assertTrue(parentId == status.getFileId());
}
finally {
IOUtils.cleanup(LOG,client);
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testXAttrFeature(){
replication=3;
preferredBlockSize=128 * 1024 * 1024;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
ImmutableList.Builder builder=new ImmutableList.Builder();
XAttr xAttr=new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(new byte[]{0x31,0x32,0x33}).build();
builder.add(xAttr);
XAttrFeature f=new XAttrFeature(builder.build());
inf.addXAttrFeature(f);
XAttrFeature f1=inf.getXAttrFeature();
assertEquals(xAttr,f1.getXAttrs().get(0));
inf.removeXAttrFeature();
f1=inf.getXAttrFeature();
assertEquals(f1,null);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test for the static {@link INodeFile#valueOf(INode,String)}and {@link INodeFileUnderConstruction#valueOf(INode,String)} methods.
* @throws IOException
*/
@Test public void testValueOf() throws IOException {
final String path="/testValueOf";
final short replication=3;
{
final INode from=null;
try {
INodeFile.valueOf(from,path);
fail();
}
catch ( FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("File does not exist"));
}
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( FileNotFoundException e) {
assertTrue(e.getMessage().contains("Directory does not exist"));
}
}
{
final INode from=createINodeFile(replication,preferredBlockSize);
final INodeFile f=INodeFile.valueOf(from,path);
assertTrue(f == from);
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( PathIsNotDirectoryException e) {
}
}
{
final INode from=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L);
from.asFile().toUnderConstruction("client","machine");
final INodeFile f=INodeFile.valueOf(from,path);
assertTrue(f == from);
try {
INodeDirectory.valueOf(from,path);
fail();
}
catch ( PathIsNotDirectoryException expected) {
}
}
{
final INode from=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,null,perm,0L);
try {
INodeFile.valueOf(from,path);
fail();
}
catch ( FileNotFoundException fnfe) {
assertTrue(fnfe.getMessage().contains("Path is not a file"));
}
final INodeDirectory d=INodeDirectory.valueOf(from,path);
assertTrue(d == from);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetFullPathName(){
replication=3;
preferredBlockSize=128 * 1024 * 1024;
INodeFile inf=createINodeFile(replication,preferredBlockSize);
inf.setLocalName(DFSUtil.string2Bytes("f"));
INodeDirectory root=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,INodeDirectory.ROOT_NAME,perm,0L);
INodeDirectory dir=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,DFSUtil.string2Bytes("d"),perm,0L);
assertEquals("f",inf.getFullPathName());
dir.addChild(inf);
assertEquals("d" + Path.SEPARATOR + "f",inf.getFullPathName());
root.addChild(dir);
assertEquals(Path.SEPARATOR + "d" + Path.SEPARATOR+ "f",inf.getFullPathName());
assertEquals(Path.SEPARATOR + "d",dir.getFullPathName());
assertEquals(Path.SEPARATOR,root.getFullPathName());
}
InternalCallVerifier EqualityVerifier
@Test public void testConcatBlocks(){
INodeFile origFile=createINodeFiles(1,"origfile")[0];
assertEquals("Number of blocks didn't match",origFile.numBlocks(),1L);
INodeFile[] appendFiles=createINodeFiles(4,"appendfile");
origFile.concatBlocks(appendFiles);
assertEquals("Number of blocks didn't match",origFile.numBlocks(),5L);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testLocationLimitInListingOps() throws Exception {
final Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT,9);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
ArrayList source=new ArrayList();
hdfs.mkdirs(new Path("/tmp1"));
hdfs.mkdirs(new Path("/tmp2"));
source.add("f1");
source.add("f2");
int numEntries=source.size();
for (int j=0; j < numEntries; j++) {
DFSTestUtil.createFile(hdfs,new Path("/tmp1/" + source.get(j)),4096,3 * 1024 - 100,1024,(short)3,0);
}
byte[] start=HdfsFileStatus.EMPTY_NAME;
for (int j=0; j < numEntries; j++) {
DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp1",start,true);
assertTrue(dl.getPartialListing().length == 1);
for (int i=0; i < dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start=dl.getLastName();
}
assertTrue(source.size() == 0);
source.add("f1");
source.add("f2");
source.add("f3");
source.add("f4");
source.add("f5");
source.add("f6");
numEntries=source.size();
for (int j=0; j < numEntries; j++) {
DFSTestUtil.createFile(hdfs,new Path("/tmp2/" + source.get(j)),4096,3 * 1024 - 100,1024,(short)1,0);
}
start=HdfsFileStatus.EMPTY_NAME;
for (int j=0; j < numEntries / 3; j++) {
DirectoryListing dl=cluster.getNameNodeRpc().getListing("/tmp2",start,true);
assertTrue(dl.getPartialListing().length == 3);
for (int i=0; i < dl.getPartialListing().length; i++) {
source.remove(dl.getPartialListing()[i].getLocalName());
}
start=dl.getLastName();
}
assertTrue(source.size() == 0);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
/**
* Test whether the inode in inodeMap has been replaced after regular inode
* replacement
*/
@Test public void testInodeReplacement() throws Exception {
final Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem hdfs=cluster.getFileSystem();
final FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
final Path dir=new Path("/dir");
hdfs.mkdirs(dir);
INodeDirectory dirNode=getDir(fsdir,dir);
INode dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
hdfs.setQuota(dir,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
dirNode=getDir(fsdir,dir);
assertTrue(dirNode.isWithQuota());
dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
hdfs.setQuota(dir,-1,-1);
dirNode=getDir(fsdir,dir);
dirNodeFromNode=fsdir.getInode(dirNode.getId());
assertSame(dirNode,dirNodeFromNode);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
* replace the original INodeDirectory. Before HDFS-4243, the parent field of
* all the children INodes of the target INodeDirectory is not changed to
* point to the new INodeDirectoryWithQuota. This testcase tests this
* scenario.
*/
@Test public void testGetFullPathNameAfterSetQuota() throws Exception {
long fileLen=1024;
replication=3;
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNamesystem();
FSDirectory fsdir=fsn.getFSDirectory();
DistributedFileSystem dfs=cluster.getFileSystem();
final Path dir=new Path("/dir");
final Path file=new Path(dir,"file");
DFSTestUtil.createFile(dfs,file,fileLen,replication,0L);
INode fnode=fsdir.getINode(file.toString());
assertEquals(file.toString(),fnode.getFullPathName());
dfs.setQuota(dir,Long.MAX_VALUE - 1,replication * fileLen * 10);
INodeDirectory dirNode=getDir(fsdir,dir);
assertEquals(dir.toString(),dirNode.getFullPathName());
assertTrue(dirNode.isWithQuota());
final Path newDir=new Path("/newdir");
final Path newFile=new Path(newDir,"file");
dfs.rename(dir,newDir,Options.Rename.OVERWRITE);
fnode=fsdir.getINode(newFile.toString());
assertEquals(newFile.toString(),fnode.getFullPathName());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void largeDelete() throws Throwable {
mc=new MiniDFSCluster.Builder(CONF).build();
try {
mc.waitActive();
Assert.assertNotNull("No Namenode in cluster",mc.getNameNode());
createFiles();
Assert.assertEquals(TOTAL_BLOCKS,getBlockCount());
runThreads();
}
finally {
mc.shutdown();
}
}
InternalCallVerifier NullVerifier
@Test public void testRemoveLeaseWithPrefixPath() throws Exception {
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
LeaseManager lm=NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
lm.addLease("holder1","/a/b");
lm.addLease("holder2","/a/c");
assertNotNull(lm.getLeaseByPath("/a/b"));
assertNotNull(lm.getLeaseByPath("/a/c"));
lm.removeLeaseWithPrefixPath("/a");
assertNull(lm.getLeaseByPath("/a/b"));
assertNull(lm.getLeaseByPath("/a/c"));
lm.addLease("holder1","/a/b");
lm.addLease("holder2","/a/c");
lm.removeLeaseWithPrefixPath("/a/");
assertNull(lm.getLeaseByPath("/a/b"));
assertNull(lm.getLeaseByPath("/a/c"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Check that listCorruptFileBlocks works while the namenode is still in safemode.
*/
@Test(timeout=300000) public void testListCorruptFileBlocksInSafeMode() throws Exception {
MiniDFSCluster cluster=null;
Random random=new Random();
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3 * 1000);
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,1.5f);
conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,0f);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
cluster=new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
FileSystem fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testListCorruptFileBlocksInSafeMode").setNumFiles(2).setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs,"/srcdat10");
Collection badFiles=cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/",null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.",badFiles.size() == 0);
File storageDir=cluster.getInstanceStorageDir(0,0);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,cluster.getNamesystem().getBlockPoolId());
assertTrue("data directory does not exist",data_dir.exists());
List metaFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
assertTrue("Data directory does not contain any blocks or there was an " + "IO error",metaFiles != null && !metaFiles.isEmpty());
File metaFile=metaFiles.get(0);
RandomAccessFile file=new RandomAccessFile(metaFile,"rw");
FileChannel channel=file.getChannel();
long position=channel.size() - 2;
int length=2;
byte[] buffer=new byte[length];
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer),position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset "+ position+ " length "+ length);
try {
util.checkFiles(fs,"/srcdat10");
}
catch ( BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
}
catch ( IOException e) {
assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException "+ e,false);
}
badFiles=cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",badFiles.size() == 1);
cluster.restartNameNode(0);
fs=cluster.getFileSystem();
while (!cluster.getNameNode().namesystem.isPopulatingReplQueues()) {
try {
LOG.info("waiting for replication queues");
Thread.sleep(1000);
}
catch ( InterruptedException ignore) {
}
}
try {
util.checkFiles(fs,"/srcdat10");
}
catch ( BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
}
catch ( IOException e) {
assertTrue("Corrupted replicas not handled properly. " + "Expecting BlockMissingException " + " but received IOException "+ e,false);
}
badFiles=cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",badFiles.size() == 1);
assertTrue("Namenode is not in safe mode",cluster.getNameNode().isInSafeMode());
cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false);
util.cleanup(fs,"/srcdat10");
}
catch ( Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw e;
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Test if NN.listCorruptFiles() returns the right number of results.
* The corrupt blocks are detected by the BlockPoolSliceScanner.
* Also, test that DFS.listCorruptFileBlocks can make multiple successive
* calls.
*/
@Test(timeout=300000) public void testMaxCorruptFiles() throws Exception {
MiniDFSCluster cluster=null;
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3 * 1000);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
final int maxCorruptFileBlocks=FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;
DFSTestUtil util=new DFSTestUtil.Builder().setName("testMaxCorruptFiles").setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs,"/srcdat2",(short)1);
util.waitReplication(fs,"/srcdat2",(short)1);
final NameNode namenode=cluster.getNameNode();
Collection badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.",badFiles.size() == 0);
final String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 4; i++) {
for (int j=0; j <= 1; j++) {
File storageDir=cluster.getInstanceStorageDir(i,j);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
LOG.info("Removing files from " + data_dir);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.",blockFile.delete());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
}
LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner");
cluster.restartDataNodes();
cluster.waitActive();
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
while (badFiles.size() < maxCorruptFileBlocks) {
LOG.info("# of corrupt files is: " + badFiles.size());
Thread.sleep(10000);
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
}
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/srcdat2",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting "+ maxCorruptFileBlocks+ ".",badFiles.size() == maxCorruptFileBlocks);
CorruptFileBlockIterator iter=(CorruptFileBlockIterator)fs.listCorruptFileBlocks(new Path("/srcdat2"));
int corruptPaths=countPaths(iter);
assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got "+ corruptPaths,corruptPaths > maxCorruptFileBlocks);
assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(),iter.getCallsMade() > 1);
util.cleanup(fs,"/srcdat2");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testlistCorruptFileBlocks() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
FileSystem fs=null;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs,"/corruptData");
final NameNode namenode=cluster.getNameNode();
Collection corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",null);
int numCorrupt=corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 4; i++) {
for (int j=0; j <= 1; j++) {
File storageDir=cluster.getInstanceStorageDir(i,j);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
LOG.info("Deliberately removing file " + blockFile.getName());
assertTrue("Cannot remove file.",blockFile.delete());
LOG.info("Deliberately removing file " + metadataFile.getName());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
}
int count=0;
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",null);
numCorrupt=corruptFileBlocks.size();
while (numCorrupt < 3) {
Thread.sleep(1000);
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",null);
numCorrupt=corruptFileBlocks.size();
count++;
if (count > 30) break;
}
LOG.info("Namenode has bad files. " + numCorrupt);
assertTrue(numCorrupt == 3);
FSNamesystem.CorruptFileBlockInfo[] cfb=corruptFileBlocks.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
String[] cookie=new String[]{"1"};
Collection nextCorruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",cookie);
FSNamesystem.CorruptFileBlockInfo[] ncfb=nextCorruptFileBlocks.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
numCorrupt=nextCorruptFileBlocks.size();
assertTrue(numCorrupt == 2);
assertTrue(ncfb[0].block.getBlockName().equalsIgnoreCase(cfb[1].block.getBlockName()));
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/corruptData",cookie);
numCorrupt=corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
util.createFiles(fs,"/goodData");
corruptFileBlocks=namenode.getNamesystem().listCorruptFileBlocks("/goodData",null);
numCorrupt=corruptFileBlocks.size();
assertTrue(numCorrupt == 0);
util.cleanup(fs,"/corruptData");
util.cleanup(fs,"/goodData");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* check if nn.getCorruptFiles() returns a file that has corrupted blocks
*/
@Test(timeout=300000) public void testListCorruptFilesCorruptedBlock() throws Exception {
MiniDFSCluster cluster=null;
Random random=new Random();
try {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,3 * 1000);
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10);
cluster=new MiniDFSCluster.Builder(conf).build();
FileSystem fs=cluster.getFileSystem();
DFSTestUtil util=new DFSTestUtil.Builder().setName("testCorruptFilesCorruptedBlock").setNumFiles(2).setMaxLevels(1).setMaxSize(512).build();
util.createFiles(fs,"/srcdat10");
final NameNode namenode=cluster.getNameNode();
Collection badFiles=namenode.getNamesystem().listCorruptFileBlocks("/",null);
assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting None.",badFiles.size() == 0);
String bpid=cluster.getNamesystem().getBlockPoolId();
File storageDir=cluster.getInstanceStorageDir(0,1);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
assertTrue("data directory does not exist",data_dir.exists());
List metaFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
assertTrue("Data directory does not contain any blocks or there was an " + "IO error",metaFiles != null && !metaFiles.isEmpty());
File metaFile=metaFiles.get(0);
RandomAccessFile file=new RandomAccessFile(metaFile,"rw");
FileChannel channel=file.getChannel();
long position=channel.size() - 2;
int length=2;
byte[] buffer=new byte[length];
random.nextBytes(buffer);
channel.write(ByteBuffer.wrap(buffer),position);
file.close();
LOG.info("Deliberately corrupting file " + metaFile.getName() + " at offset "+ position+ " length "+ length);
try {
util.checkFiles(fs,"/srcdat10");
}
catch ( BlockMissingException e) {
System.out.println("Received BlockMissingException as expected.");
}
catch ( IOException e) {
assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " + " but received IOException " + e,false);
}
badFiles=namenode.getNamesystem().listCorruptFileBlocks("/",null);
LOG.info("Namenode has bad files. " + badFiles.size());
assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting 1.",badFiles.size() == 1);
util.cleanup(fs,"/srcdat10");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* test listCorruptFileBlocks in DistributedFileSystem
*/
@Test(timeout=300000) public void testlistCorruptFileBlocksDFS() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1);
FileSystem fs=null;
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs=cluster.getFileSystem();
DistributedFileSystem dfs=(DistributedFileSystem)fs;
DFSTestUtil util=new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
util.createFiles(fs,"/corruptData");
RemoteIterator corruptFileBlocks=dfs.listCorruptFileBlocks(new Path("/corruptData"));
int numCorrupt=countPaths(corruptFileBlocks);
assertTrue(numCorrupt == 0);
String bpid=cluster.getNamesystem().getBlockPoolId();
for (int i=0; i < 2; i++) {
File storageDir=cluster.getInstanceStorageDir(0,i);
File data_dir=MiniDFSCluster.getFinalizedDir(storageDir,bpid);
List metadataFiles=MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
if (metadataFiles == null) continue;
for ( File metadataFile : metadataFiles) {
File blockFile=Block.metaToBlockFile(metadataFile);
LOG.info("Deliberately removing file " + blockFile.getName());
assertTrue("Cannot remove file.",blockFile.delete());
LOG.info("Deliberately removing file " + metadataFile.getName());
assertTrue("Cannot remove file.",metadataFile.delete());
}
}
int count=0;
corruptFileBlocks=dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt=countPaths(corruptFileBlocks);
while (numCorrupt < 3) {
Thread.sleep(1000);
corruptFileBlocks=dfs.listCorruptFileBlocks(new Path("/corruptData"));
numCorrupt=countPaths(corruptFileBlocks);
count++;
if (count > 30) break;
}
LOG.info("Namenode has bad files. " + numCorrupt);
assertTrue(numCorrupt == 3);
util.cleanup(fs,"/corruptData");
util.cleanup(fs,"/goodData");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDictionary() throws Exception {
NameCache cache=new NameCache(2);
String[] matching={"part1","part10000000","fileabc","abc","filepart"};
String[] notMatching={"spart1","apart","abcd","def"};
for ( String s : matching) {
cache.put(s);
assertTrue(s == cache.put(s));
}
for ( String s : notMatching) {
cache.put(s);
}
cache.initialized();
for ( String s : matching) {
verifyNameReuse(cache,s,true);
}
assertEquals(matching.length,cache.size());
for ( String s : notMatching) {
verifyNameReuse(cache,s,false);
}
cache.reset();
cache.initialized();
for ( String s : matching) {
verifyNameReuse(cache,s,false);
}
for ( String s : notMatching) {
verifyNameReuse(cache,s,false);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
* This test tries to simulate failure scenarios.
* 1. Start cluster with shared name and edits dir
* 2. Restart cluster by adding separate name and edits dirs
* 3. Restart cluster by removing shared name and edits dir
* 4. Restart cluster with old shared name and edits dir, but only latest
* name dir. This should fail since we don't have latest edits dir
* 5. Restart cluster with old shared name and edits dir, but only latest
* edits dir. This should succeed since the latest edits will have
* segments leading all the way from the image in name_and_edits.
*/
@Test public void testNameEditsConfigsFailure() throws IOException {
Path file1=new Path("TestNameEditsConfigs1");
Path file2=new Path("TestNameEditsConfigs2");
Path file3=new Path("TestNameEditsConfigs3");
MiniDFSCluster cluster=null;
Configuration conf=null;
FileSystem fileSys=null;
File nameOnlyDir=new File(base_dir,"name");
File editsOnlyDir=new File(base_dir,"edits");
File nameAndEditsDir=new File(base_dir,"name_and_edits");
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build();
cluster.waitActive();
assertTrue(new File(nameAndEditsDir,"current/VERSION").exists());
fileSys=cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
DFSTestUtil.createFile(fileSys,file1,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file1,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
conf=new HdfsConfiguration();
assertTrue(nameOnlyDir.mkdir());
assertTrue(editsOnlyDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath() + "," + nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath() + "," + editsOnlyDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
assertTrue(new File(nameAndEditsDir,"current/VERSION").exists());
assertTrue(new File(nameOnlyDir,"current/VERSION").exists());
assertTrue(new File(editsOnlyDir,"current/VERSION").exists());
fileSys=cluster.getFileSystem();
assertTrue(fileSys.exists(file1));
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
DFSTestUtil.createFile(fileSys,file2,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file2,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
try {
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsOnlyDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
checkFile(fileSys,file2,replication);
cleanupFile(fileSys,file2);
DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file3,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameOnlyDir.getPath() + "," + nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
fail("Successfully started cluster but should not have been able to.");
}
catch ( IOException e) {
LOG.info("EXPECTED: cluster start failed due to missing " + "latest edits dir",e);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
cluster=null;
}
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsOnlyDir.getPath() + "," + nameAndEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
fileSys=cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertFalse(fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys,file3,replication);
cleanupFile(fileSys,file3);
DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file3,replication);
}
finally {
fileSys.close();
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
* The test creates files and restarts cluster with different configs.
* 1. Starts cluster with shared name and edits dirs
* 2. Restarts cluster by adding additional (different) name and edits dirs
* 3. Restarts cluster by removing shared name and edits dirs by allowing to
* start using separate name and edits dirs
* 4. Restart cluster by adding shared directory again, but make sure we
* do not read any stale image or edits.
* All along the test, we create and delete files at reach restart to make
* sure we are reading proper edits and image.
* @throws Exception
*/
@Test public void testNameEditsConfigs() throws Exception {
Path file1=new Path("TestNameEditsConfigs1");
Path file2=new Path("TestNameEditsConfigs2");
Path file3=new Path("TestNameEditsConfigs3");
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
Configuration conf=null;
FileSystem fileSys=null;
final File newNameDir=new File(base_dir,"name");
final File newEditsDir=new File(base_dir,"edits");
final File nameAndEdits=new File(base_dir,"name_and_edits");
final File checkpointNameDir=new File(base_dir,"secondname");
final File checkpointEditsDir=new File(base_dir,"secondedits");
final File checkpointNameAndEdits=new File(base_dir,"second_name_and_edits");
ImmutableList allCurrentDirs=ImmutableList.of(new File(nameAndEdits,"current"),new File(newNameDir,"current"),new File(newEditsDir,"current"),new File(checkpointNameAndEdits,"current"),new File(checkpointNameDir,"current"),new File(checkpointEditsDir,"current"));
ImmutableList imageCurrentDirs=ImmutableList.of(new File(nameAndEdits,"current"),new File(newNameDir,"current"),new File(checkpointNameAndEdits,"current"),new File(checkpointNameDir,"current"));
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointNameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,checkpointNameAndEdits.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=startSecondaryNameNode(conf);
fileSys=cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
DFSTestUtil.createFile(fileSys,file1,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file1,replication);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
conf=new HdfsConfiguration();
assertTrue(newNameDir.mkdir());
assertTrue(newEditsDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEdits.getPath() + "," + newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEdits.getPath() + "," + newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=startSecondaryNameNode(conf);
fileSys=cluster.getFileSystem();
try {
assertTrue(fileSys.exists(file1));
checkFile(fileSys,file1,replication);
cleanupFile(fileSys,file1);
DFSTestUtil.createFile(fileSys,file2,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file2,replication);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
FSImageTestUtil.assertParallelFilesAreIdentical(allCurrentDirs,ImmutableSet.of("VERSION"));
FSImageTestUtil.assertSameNewestImage(imageCurrentDirs);
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,checkpointEditsDir.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=startSecondaryNameNode(conf);
fileSys=cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
checkFile(fileSys,file2,replication);
cleanupFile(fileSys,file2);
DFSTestUtil.createFile(fileSys,file3,FILE_SIZE,FILE_SIZE,BLOCK_SIZE,replication,SEED);
checkFile(fileSys,file3,replication);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
checkImageAndEditsFilesExistence(newNameDir,true,false);
checkImageAndEditsFilesExistence(newEditsDir,false,true);
checkImageAndEditsFilesExistence(checkpointNameDir,true,false);
checkImageAndEditsFilesExistence(checkpointEditsDir,false,true);
assertTrue(FileUtil.fullyDelete(new File(nameAndEdits,"current")));
assertTrue(FileUtil.fullyDelete(new File(checkpointNameAndEdits,"current")));
conf=new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameAndEdits.getPath() + "," + newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameAndEdits + "," + newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
replication=(short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,3);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=startSecondaryNameNode(conf);
fileSys=cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
assertTrue(!fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys,file3,replication);
secondary.doCheckpoint();
}
finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
checkImageAndEditsFilesExistence(nameAndEdits,true,true);
checkImageAndEditsFilesExistence(checkpointNameAndEdits,true,true);
}
InternalCallVerifier BooleanVerifier
@Test public void testHttpPolicy() throws Exception {
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY,policy.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,"localhost:0");
InetSocketAddress addr=InetSocketAddress.createUnresolved("localhost",0);
NameNodeHttpServer server=null;
try {
server=new NameNodeHttpServer(conf,null,addr);
server.start();
Assert.assertTrue(implies(policy.isHttpEnabled(),canAccess("http",server.getHttpAddress())));
Assert.assertTrue(implies(!policy.isHttpEnabled(),server.getHttpAddress() == null));
Assert.assertTrue(implies(policy.isHttpsEnabled(),canAccess("https",server.getHttpsAddress())));
Assert.assertTrue(implies(!policy.isHttpsEnabled(),server.getHttpsAddress() == null));
}
finally {
if (server != null) {
server.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings({"unchecked"}) @Test public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1);
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn=cluster.getNameNode().namesystem;
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
FileSystem localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean");
Path includeFile=new Path(dir,"include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts=new StringBuilder();
for ( DataNode dn : cluster.getDataNodes()) {
includeHosts.append(dn.getDisplayName()).append("\n");
}
DFSTestUtil.writeFile(localFileSys,includeFile,includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath());
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
cluster.stopDataNode(0);
while (fsn.getNumDatanodesInService() != 2) {
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
String clusterId=(String)mbs.getAttribute(mxbeanName,"ClusterId");
assertEquals(fsn.getClusterId(),clusterId);
String blockpoolId=(String)mbs.getAttribute(mxbeanName,"BlockPoolId");
assertEquals(fsn.getBlockPoolId(),blockpoolId);
String version=(String)mbs.getAttribute(mxbeanName,"Version");
assertEquals(fsn.getVersion(),version);
assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision()));
Long used=(Long)mbs.getAttribute(mxbeanName,"Used");
assertEquals(fsn.getUsed(),used.longValue());
Long total=(Long)mbs.getAttribute(mxbeanName,"Total");
assertEquals(fsn.getTotal(),total.longValue());
String safemode=(String)mbs.getAttribute(mxbeanName,"Safemode");
assertEquals(fsn.getSafemode(),safemode);
Long nondfs=(Long)(mbs.getAttribute(mxbeanName,"NonDfsUsedSpace"));
assertEquals(fsn.getNonDfsUsedSpace(),nondfs.longValue());
Float percentremaining=(Float)(mbs.getAttribute(mxbeanName,"PercentRemaining"));
assertEquals(fsn.getPercentRemaining(),percentremaining.floatValue(),DELTA);
Long totalblocks=(Long)(mbs.getAttribute(mxbeanName,"TotalBlocks"));
assertEquals(fsn.getTotalBlocks(),totalblocks.longValue());
String alivenodeinfo=(String)(mbs.getAttribute(mxbeanName,"LiveNodes"));
Map> liveNodes=(Map>)JSON.parse(alivenodeinfo);
assertTrue(liveNodes.size() > 0);
for ( Map liveNode : liveNodes.values()) {
assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
assertTrue(liveNode.containsKey("capacity"));
assertTrue(((Long)liveNode.get("capacity")) > 0);
assertTrue(liveNode.containsKey("numBlocks"));
assertTrue(((Long)liveNode.get("numBlocks")) == 0);
}
assertEquals(fsn.getLiveNodes(),alivenodeinfo);
String deadnodeinfo=(String)(mbs.getAttribute(mxbeanName,"DeadNodes"));
assertEquals(fsn.getDeadNodes(),deadnodeinfo);
Map> deadNodes=(Map>)JSON.parse(deadnodeinfo);
assertTrue(deadNodes.size() > 0);
for ( Map deadNode : deadNodes.values()) {
assertTrue(deadNode.containsKey("lastContact"));
assertTrue(deadNode.containsKey("decommissioned"));
assertTrue(deadNode.containsKey("xferaddr"));
}
String nodeUsage=(String)(mbs.getAttribute(mxbeanName,"NodeUsage"));
assertEquals("Bad value for NodeUsage",fsn.getNodeUsage(),nodeUsage);
String nameJournalStatus=(String)(mbs.getAttribute(mxbeanName,"NameJournalStatus"));
assertEquals("Bad value for NameJournalStatus",fsn.getNameJournalStatus(),nameJournalStatus);
String journalTxnInfo=(String)mbs.getAttribute(mxbeanName,"JournalTransactionInfo");
assertEquals("Bad value for NameTxnIds",fsn.getJournalTransactionInfo(),journalTxnInfo);
String nnStarted=(String)mbs.getAttribute(mxbeanName,"NNStarted");
assertEquals("Bad value for NNStarted",fsn.getNNStarted(),nnStarted);
String compileInfo=(String)mbs.getAttribute(mxbeanName,"CompileInfo");
assertEquals("Bad value for CompileInfo",fsn.getCompileInfo(),compileInfo);
String corruptFiles=(String)(mbs.getAttribute(mxbeanName,"CorruptFiles"));
assertEquals("Bad value for CorruptFiles",fsn.getCorruptFiles(),corruptFiles);
String nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses"));
assertEquals(fsn.getNameDirStatuses(),nameDirStatuses);
Map> statusMap=(Map>)JSON.parse(nameDirStatuses);
Collection nameDirUris=cluster.getNameDirs(0);
for ( URI nameDirUri : nameDirUris) {
File nameDir=new File(nameDirUri);
System.out.println("Checking for the presence of " + nameDir + " in active name dirs.");
assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
}
assertEquals(2,statusMap.get("active").size());
assertEquals(0,statusMap.get("failed").size());
File failedNameDir=new File(nameDirUris.iterator().next());
assertEquals(0,FileUtil.chmod(new File(failedNameDir,"current").getAbsolutePath(),"000"));
cluster.getNameNodeRpc().rollEditLog();
nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses"));
statusMap=(Map>)JSON.parse(nameDirStatuses);
for ( URI nameDirUri : nameDirUris) {
File nameDir=new File(nameDirUri);
String expectedStatus=nameDir.equals(failedNameDir) ? "failed" : "active";
System.out.println("Checking for the presence of " + nameDir + " in "+ expectedStatus+ " name dirs.");
assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath()));
}
assertEquals(1,statusMap.get("active").size());
assertEquals(1,statusMap.get("failed").size());
assertEquals(0L,mbs.getAttribute(mxbeanName,"CacheUsed"));
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * cluster.getDataNodes().size(),mbs.getAttribute(mxbeanName,"CacheCapacity"));
}
finally {
if (cluster != null) {
for ( URI dir : cluster.getNameDirs(0)) {
FileUtil.chmod(new File(new File(dir),"current").getAbsolutePath(),"755");
}
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests that hasAvailableDiskSpace returns true if disk usage is below
* threshold.
*/
@Test public void testCheckAvailability() throws IOException {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,0);
NameNodeResourceChecker nb=new NameNodeResourceChecker(conf);
assertTrue("isResourceAvailable must return true if " + "disk usage is lower than threshold",nb.hasAvailableDiskSpace());
}
InternalCallVerifier BooleanVerifier
/**
* Test that the NN is considered to be out of resources only once all
* redundant configured volumes are low on resources, or when any required
* volume is low on resources.
*/
@Test public void testLowResourceVolumePolicy() throws IOException, URISyntaxException {
Configuration conf=new Configuration();
File nameDir1=new File(BASE_DIR,"name-dir1");
File nameDir2=new File(BASE_DIR,"name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,2);
NameNodeResourceChecker nnrc=new NameNodeResourceChecker(conf);
Map volumes=new HashMap();
CheckedVolume volume1=Mockito.mock(CheckedVolume.class);
CheckedVolume volume2=Mockito.mock(CheckedVolume.class);
CheckedVolume volume3=Mockito.mock(CheckedVolume.class);
CheckedVolume volume4=Mockito.mock(CheckedVolume.class);
CheckedVolume volume5=Mockito.mock(CheckedVolume.class);
Mockito.when(volume1.isResourceAvailable()).thenReturn(true);
Mockito.when(volume2.isResourceAvailable()).thenReturn(true);
Mockito.when(volume3.isResourceAvailable()).thenReturn(true);
Mockito.when(volume4.isResourceAvailable()).thenReturn(true);
Mockito.when(volume5.isResourceAvailable()).thenReturn(true);
Mockito.when(volume4.isRequired()).thenReturn(true);
Mockito.when(volume5.isRequired()).thenReturn(true);
volumes.put("volume1",volume1);
volumes.put("volume2",volume2);
volumes.put("volume3",volume3);
volumes.put("volume4",volume4);
volumes.put("volume5",volume5);
nnrc.setVolumes(volumes);
assertTrue(nnrc.hasAvailableDiskSpace());
Mockito.when(volume1.isResourceAvailable()).thenReturn(false);
assertTrue(nnrc.hasAvailableDiskSpace());
Mockito.when(volume2.isResourceAvailable()).thenReturn(false);
assertFalse(nnrc.hasAvailableDiskSpace());
nnrc.setMinimumReduntdantVolumes(1);
assertTrue(nnrc.hasAvailableDiskSpace());
Mockito.when(volume3.isResourceAvailable()).thenReturn(false);
assertFalse(nnrc.hasAvailableDiskSpace());
Mockito.when(volume3.isResourceAvailable()).thenReturn(true);
Mockito.when(volume4.isResourceAvailable()).thenReturn(false);
assertFalse(nnrc.hasAvailableDiskSpace());
}
InternalCallVerifier EqualityVerifier
/**
* Tests that only a single space check is performed if two name dirs are
* supplied which are on the same volume.
*/
@Test public void testChecking2NameDirsOnOneVolume() throws IOException {
Configuration conf=new Configuration();
File nameDir1=new File(BASE_DIR,"name-dir1");
File nameDir2=new File(BASE_DIR,"name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,Long.MAX_VALUE);
NameNodeResourceChecker nb=new NameNodeResourceChecker(conf);
assertEquals("Should not check the same volume more than once.",1,nb.getVolumesLowOnSpace().size());
}
InternalCallVerifier BooleanVerifier
/**
* Tests that hasAvailableDiskSpace returns false if disk usage is above
* threshold.
*/
@Test public void testCheckAvailabilityNeg() throws IOException {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,Long.MAX_VALUE);
NameNodeResourceChecker nb=new NameNodeResourceChecker(conf);
assertFalse("isResourceAvailable must return false if " + "disk usage is higher than threshold",nb.hasAvailableDiskSpace());
}
InternalCallVerifier BooleanVerifier
/**
* Tests that NameNode resource monitor causes the NN to enter safe mode when
* resources are low.
*/
@Test public void testCheckThatNameNodeResourceMonitorIsRunning() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
try {
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
NameNodeResourceChecker mockResourceChecker=Mockito.mock(NameNodeResourceChecker.class);
Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(true);
cluster.getNameNode().getNamesystem().nnResourceChecker=mockResourceChecker;
cluster.waitActive();
String name=NameNodeResourceMonitor.class.getName();
boolean isNameNodeMonitorRunning=false;
Set runningThreads=Thread.getAllStackTraces().keySet();
for ( Thread runningThread : runningThreads) {
if (runningThread.toString().startsWith("Thread[" + name)) {
isNameNodeMonitorRunning=true;
break;
}
}
assertTrue("NN resource monitor should be running",isNameNodeMonitorRunning);
assertFalse("NN should not presently be in safe mode",cluster.getNameNode().isInSafeMode());
Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(false);
long startMillis=Time.now();
while (!cluster.getNameNode().isInSafeMode() && Time.now() < startMillis + (60 * 1000)) {
Thread.sleep(1000);
}
assertTrue("NN should be in safe mode after resources crossed threshold",cluster.getNameNode().isInSafeMode());
}
finally {
if (cluster != null) cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier
/**
* Tests that only a single space check is performed if extra volumes are
* configured manually which also coincide with a volume the name dir is on.
*/
@Test public void testCheckingExtraVolumes() throws IOException {
Configuration conf=new Configuration();
File nameDir=new File(BASE_DIR,"name-dir");
nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY,nameDir.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,Long.MAX_VALUE);
NameNodeResourceChecker nb=new NameNodeResourceChecker(conf);
assertEquals("Should not check the same volume more than once.",1,nb.getVolumesLowOnSpace().size());
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test(timeout=300000) public void testServiceRpcBindHostKey() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
LOG.info("Testing without " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=getServiceRpcServerAddress(cluster);
assertThat("Bind address not expected to be wildcard by default.",address,not("/" + WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY,WILDCARD_ADDRESS);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=getServiceRpcServerAddress(cluster);
assertThat("Bind address " + address + " is not wildcard.",address,is("/" + WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testHttpBindHostKey() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
LOG.info("Testing without " + DFS_NAMENODE_HTTP_BIND_HOST_KEY);
try {
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=cluster.getNameNode().getHttpAddress().toString();
assertFalse("HTTP Bind address not expected to be wildcard by default.",address.startsWith(WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_HTTP_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_HTTP_BIND_HOST_KEY,WILDCARD_ADDRESS);
try {
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=cluster.getNameNode().getHttpAddress().toString();
assertTrue("HTTP Bind address " + address + " is not wildcard.",address.startsWith(WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* HTTPS test is different since we need to setup SSL configuration.
* NN also binds the wildcard address for HTTPS port by default so we must
* pick a different host/port combination.
* @throws Exception
*/
@Test(timeout=300000) public void testHttpsBindHostKey() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
LOG.info("Testing behavior without " + DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
setupSsl();
conf.set(DFS_HTTP_POLICY_KEY,HttpConfig.Policy.HTTPS_ONLY.name());
try {
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=cluster.getNameNode().getHttpsAddress().toString();
assertFalse("HTTP Bind address not expected to be wildcard by default.",address.startsWith(WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
LOG.info("Testing behavior with " + DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_HTTPS_BIND_HOST_KEY,WILDCARD_ADDRESS);
try {
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY,LOCALHOST_SERVER_ADDRESS);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=cluster.getNameNode().getHttpsAddress().toString();
assertTrue("HTTP Bind address " + address + " is not wildcard.",address.startsWith(WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
@Test(timeout=300000) public void testRpcBindHostKey() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
LOG.info("Testing without " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=getRpcServerAddress(cluster);
assertThat("Bind address not expected to be wildcard by default.",address,not("/" + WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
cluster=null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY,WILDCARD_ADDRESS);
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address=getRpcServerAddress(cluster);
assertThat("Bind address " + address + " is not wildcard.",address,is("/" + WILDCARD_ADDRESS));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNamenodeRpcBindAny() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY,"0.0.0.0");
MiniDFSCluster cluster=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
assertEquals("0.0.0.0",((NameNodeRpcServer)cluster.getNameNodeRpc()).getClientRpcServer().getListenerAddress().getHostName());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY);
}
}
InternalCallVerifier BooleanVerifier
/**
* The following test first creates a file.
* It verifies the block information from a datanode.
* Then, it updates the block with new information and verifies again.
*/
@Test public void testVolumeSize() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
long reserved=10000;
conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,reserved);
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
final FSNamesystem namesystem=cluster.getNamesystem();
final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager();
final List live=new ArrayList();
final List dead=new ArrayList();
dm.fetchDatanodes(live,dead,false);
assertTrue(live.size() == 1);
long used, remaining, configCapacity, nonDFSUsed, bpUsed;
float percentUsed, percentRemaining, percentBpUsed;
for ( final DatanodeDescriptor datanode : live) {
used=datanode.getDfsUsed();
remaining=datanode.getRemaining();
nonDFSUsed=datanode.getNonDfsUsed();
configCapacity=datanode.getCapacity();
percentUsed=datanode.getDfsUsedPercent();
percentRemaining=datanode.getRemainingPercent();
bpUsed=datanode.getBlockPoolUsed();
percentBpUsed=datanode.getBlockPoolUsedPercent();
LOG.info("Datanode configCapacity " + configCapacity + " used "+ used+ " non DFS used "+ nonDFSUsed+ " remaining "+ remaining+ " perentUsed "+ percentUsed+ " percentRemaining "+ percentRemaining);
assertTrue(configCapacity == (used + remaining + nonDFSUsed));
assertTrue(percentUsed == DFSUtil.getPercentUsed(used,configCapacity));
assertTrue(percentRemaining == DFSUtil.getPercentRemaining(remaining,configCapacity));
assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed,configCapacity));
}
DF df=new DF(new File(cluster.getDataDirectory()),conf);
int numOfDataDirs=2;
long diskCapacity=numOfDataDirs * df.getCapacity();
reserved*=numOfDataDirs;
configCapacity=namesystem.getCapacityTotal();
used=namesystem.getCapacityUsed();
nonDFSUsed=namesystem.getNonDfsUsedSpace();
remaining=namesystem.getCapacityRemaining();
percentUsed=namesystem.getPercentUsed();
percentRemaining=namesystem.getPercentRemaining();
bpUsed=namesystem.getBlockPoolUsedSpace();
percentBpUsed=namesystem.getPercentBlockPoolUsed();
LOG.info("Data node directory " + cluster.getDataDirectory());
LOG.info("Name node diskCapacity " + diskCapacity + " configCapacity "+ configCapacity+ " reserved "+ reserved+ " used "+ used+ " remaining "+ remaining+ " nonDFSUsed "+ nonDFSUsed+ " remaining "+ remaining+ " percentUsed "+ percentUsed+ " percentRemaining "+ percentRemaining+ " bpUsed "+ bpUsed+ " percentBpUsed "+ percentBpUsed);
assertTrue(configCapacity == diskCapacity - reserved);
assertTrue(configCapacity == (used + remaining + nonDFSUsed));
assertTrue(percentUsed == DFSUtil.getPercentUsed(used,configCapacity));
assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed,configCapacity));
assertTrue(percentRemaining == ((float)remaining * 100.0f) / (float)configCapacity);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testXceiverCount() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,0);
MiniDFSCluster cluster=null;
final int nodes=8;
final int fileCount=5;
final short fileRepl=3;
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(nodes).build();
cluster.waitActive();
final FSNamesystem namesystem=cluster.getNamesystem();
final DatanodeManager dnm=namesystem.getBlockManager().getDatanodeManager();
List datanodes=cluster.getDataNodes();
final DistributedFileSystem fs=cluster.getFileSystem();
triggerHeartbeats(datanodes);
int expectedTotalLoad=nodes;
int expectedInServiceNodes=nodes;
int expectedInServiceLoad=nodes;
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceLoad,namesystem.getInServiceXceiverAverage(),EPSILON);
for (int i=0; i < nodes / 2; i++) {
DataNode dn=datanodes.get(i);
DatanodeDescriptor dnd=dnm.getDatanode(dn.getDatanodeId());
dn.shutdown();
dnd.setLastUpdate(0L);
BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
expectedInServiceNodes--;
assertEquals(expectedInServiceNodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
}
cluster.restartDataNodes();
cluster.waitActive();
datanodes=cluster.getDataNodes();
expectedInServiceNodes=nodes;
assertEquals(nodes,datanodes.size());
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceLoad,namesystem.getInServiceXceiverAverage(),EPSILON);
DFSOutputStream[] streams=new DFSOutputStream[fileCount];
for (int i=0; i < fileCount; i++) {
streams[i]=(DFSOutputStream)fs.create(new Path("/f" + i),fileRepl).getWrappedStream();
streams[i].write("1".getBytes());
streams[i].hsync();
expectedTotalLoad+=2 * fileRepl;
expectedInServiceLoad+=2 * fileRepl;
}
triggerHeartbeats(datanodes);
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceNodes,namesystem.getInServiceXceiverAverage(),EPSILON);
for (int i=0; i < fileRepl; i++) {
expectedInServiceNodes--;
DatanodeDescriptor dnd=dnm.getDatanode(datanodes.get(i).getDatanodeId());
expectedInServiceLoad-=dnd.getXceiverCount();
dnm.startDecommission(dnd);
DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
Thread.sleep(100);
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceNodes,namesystem.getInServiceXceiverAverage(),EPSILON);
}
for (int i=0; i < fileCount; i++) {
int decomm=0;
for ( DatanodeInfo dni : streams[i].getPipeline()) {
DatanodeDescriptor dnd=dnm.getDatanode(dni);
expectedTotalLoad-=2;
if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) {
decomm++;
}
else {
expectedInServiceLoad-=2;
}
}
try {
streams[i].close();
}
catch ( IOException ioe) {
if (decomm < fileRepl) {
throw ioe;
}
}
triggerHeartbeats(datanodes);
assertEquals(nodes,namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
assertEquals(expectedTotalLoad,namesystem.getTotalLoad());
assertEquals((double)expectedInServiceLoad / expectedInServiceNodes,namesystem.getInServiceXceiverAverage(),EPSILON);
}
for (int i=0; i < nodes; i++) {
DataNode dn=datanodes.get(i);
dn.shutdown();
DatanodeDescriptor dnDesc=dnm.getDatanode(dn.getDatanodeId());
dnDesc.setLastUpdate(0L);
BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
assertEquals(nodes - 1 - i,namesystem.getNumLiveDataNodes());
if (i >= fileRepl) {
expectedInServiceNodes--;
}
assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService());
double expectedXceiverAvg=(i == nodes - 1) ? 0.0 : 1.0;
assertEquals((double)expectedXceiverAvg,namesystem.getInServiceXceiverAverage(),EPSILON);
}
assertEquals(0,namesystem.getNumLiveDataNodes());
assertEquals(0,namesystem.getNumDatanodesInService());
assertEquals(0.0,namesystem.getTotalLoad(),EPSILON);
assertEquals(0.0,namesystem.getInServiceXceiverAverage(),EPSILON);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test for crateSnapshot
*/
@Test public void testSnapshotMethods() throws Exception {
String dir="/testNamenodeRetryCache/testCreateSnapshot/src";
resetCall();
namesystem.mkdirs(dir,perm,true);
namesystem.allowSnapshot(dir);
newCall();
String name=namesystem.createSnapshot(dir,"snap1");
Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1"));
Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1"));
Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1"));
newCall();
try {
namesystem.createSnapshot(dir,"snap1");
Assert.fail("testSnapshotMethods expected exception is not thrown");
}
catch ( IOException e) {
}
newCall();
namesystem.renameSnapshot(dir,"snap1","snap2");
namesystem.renameSnapshot(dir,"snap1","snap2");
namesystem.renameSnapshot(dir,"snap1","snap2");
newCall();
try {
namesystem.renameSnapshot(dir,"snap1","snap2");
Assert.fail("testSnapshotMethods expected exception is not thrown");
}
catch ( IOException e) {
}
newCall();
namesystem.deleteSnapshot(dir,"snap2");
namesystem.deleteSnapshot(dir,"snap2");
namesystem.deleteSnapshot(dir,"snap2");
newCall();
try {
namesystem.deleteSnapshot(dir,"snap2");
Assert.fail("testSnapshotMethods expected exception is not thrown");
}
catch ( IOException e) {
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After run a set of operations, restart NN and check if the retry cache has
* been rebuilt based on the editlog.
*/
@Test public void testRetryCacheRebuild() throws Exception {
DFSTestUtil.runOperations(cluster,filesystem,conf,BlockSize,0);
LightWeightCache cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.restartNameNode();
cluster.waitActive();
namesystem=cluster.getNamesystem();
assertTrue(namesystem.hasRetryCache());
cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test for create file
*/
@Test public void testCreate() throws Exception {
String src="/testNamenodeRetryCache/testCreate/file";
newCall();
HdfsFileStatus status=namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null);
Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null));
Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null));
newCall();
try {
namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null);
Assert.fail("testCreate - expected exception is not thrown");
}
catch ( IOException e) {
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test for rename1
*/
@Test public void testAppend() throws Exception {
String src="/testNamenodeRetryCache/testAppend/src";
resetCall();
DFSTestUtil.createFile(filesystem,new Path(src),128,(short)1,0L);
newCall();
LocatedBlock b=namesystem.appendFile(src,"holder","clientMachine");
Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine"));
Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine"));
newCall();
try {
namesystem.appendFile(src,"holder","clientMachine");
Assert.fail("testAppend - expected exception is not thrown");
}
catch ( Exception e) {
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests for delete call
*/
@Test public void testDelete() throws Exception {
String dir="/testNamenodeRetryCache/testDelete";
newCall();
namesystem.mkdirs(dir,perm,true);
newCall();
Assert.assertTrue(namesystem.delete(dir,false));
Assert.assertTrue(namesystem.delete(dir,false));
Assert.assertTrue(namesystem.delete(dir,false));
newCall();
Assert.assertFalse(namesystem.delete(dir,false));
}
InternalCallVerifier BooleanVerifier
/**
* Test for rename1
*/
@SuppressWarnings("deprecation") @Test public void testRename1() throws Exception {
String src="/testNamenodeRetryCache/testRename1/src";
String target="/testNamenodeRetryCache/testRename1/target";
resetCall();
namesystem.mkdirs(src,perm,true);
newCall();
Assert.assertTrue(namesystem.renameTo(src,target));
Assert.assertTrue(namesystem.renameTo(src,target));
Assert.assertTrue(namesystem.renameTo(src,target));
newCall();
Assert.assertFalse(namesystem.renameTo(src,target));
}
InternalCallVerifier NullVerifier
@Test public void testRetryCacheConfig(){
Configuration conf=new HdfsConfiguration();
Assert.assertNotNull(FSNamesystem.initRetryCache(conf));
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY,false);
Assert.assertNull(FSNamesystem.initRetryCache(conf));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* check if DFS remains in proper condition after a restart
*/
@Test public void testRestartDFS() throws Exception {
final Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FSNamesystem fsn=null;
int numNamenodeDirs;
DFSTestUtil files=new DFSTestUtil.Builder().setName("TestRestartDFS").setNumFiles(200).build();
final String dir="/srcdat";
final Path rootpath=new Path("/");
final Path dirpath=new Path(dir);
long rootmtime;
FileStatus rootstatus;
FileStatus dirstatus;
try {
cluster=new MiniDFSCluster.Builder(conf).format(true).numDataNodes(NUM_DATANODES).build();
String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,new String[]{});
numNamenodeDirs=nameNodeDirs.length;
assertTrue("failed to get number of Namenode StorageDirs",numNamenodeDirs != 0);
FileSystem fs=cluster.getFileSystem();
files.createFiles(fs,dir);
rootmtime=fs.getFileStatus(rootpath).getModificationTime();
rootstatus=fs.getFileStatus(dirpath);
dirstatus=fs.getFileStatus(dirpath);
fs.setOwner(rootpath,rootstatus.getOwner() + "_XXX",null);
fs.setOwner(dirpath,null,dirstatus.getGroup() + "_XXX");
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,1);
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build();
fsn=cluster.getNamesystem();
FileSystem fs=cluster.getFileSystem();
assertTrue("Filesystem corrupted after restart.",files.checkFiles(fs,dir));
final FileStatus newrootstatus=fs.getFileStatus(rootpath);
assertEquals(rootmtime,newrootstatus.getModificationTime());
assertEquals(rootstatus.getOwner() + "_XXX",newrootstatus.getOwner());
assertEquals(rootstatus.getGroup(),newrootstatus.getGroup());
final FileStatus newdirstatus=fs.getFileStatus(dirpath);
assertEquals(dirstatus.getOwner(),newdirstatus.getOwner());
assertEquals(dirstatus.getGroup() + "_XXX",newdirstatus.getGroup());
rootmtime=fs.getFileStatus(rootpath).getModificationTime();
final String checkAfterRestart=checkImages(fsn,numNamenodeDirs);
files.cleanup(fs,dir);
files.createFiles(fs,dir);
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
final String checkAfterModify=checkImages(fsn,numNamenodeDirs);
assertFalse("Modified namespace should change fsimage contents. " + "was: " + checkAfterRestart + " now: "+ checkAfterModify,checkAfterRestart.equals(checkAfterModify));
fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
files.cleanup(fs,dir);
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this the above condition is
* tested by reducing the replication factor
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (2) is less than replication factor (3))
* Set the replication factor to 2
* Verify that the corrupt replica is removed.
* (corrupt replica should not be removed since number of good
* replicas (2) is equal to replication factor (2))
*/
@Test public void testWhenDecreasingReplication() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
assertEquals(2,countReplicas(namesystem,block).liveReplicas());
assertEquals(1,countReplicas(namesystem,block).corruptReplicas());
namesystem.setReplication(fileName.toString(),(short)2);
try {
Thread.sleep(3000);
}
catch ( InterruptedException ignored) {
}
assertEquals(2,countReplicas(namesystem,block).liveReplicas());
assertEquals(0,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* None of the blocks can be removed if all blocks are corrupt.
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt all three replicas
* Verify that all replicas are corrupt and 3 replicas are present.
* Set the replication factor to 1
* Verify that all replicas are corrupt and 3 replicas are present.
*/
@Test public void testWithAllCorruptReplicas() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
corruptBlock(cluster,fs,fileName,1,block);
corruptBlock(cluster,fs,fileName,2,block);
try {
Thread.sleep(3000);
}
catch ( InterruptedException ignored) {
}
assertEquals(0,countReplicas(namesystem,block).liveReplicas());
assertEquals(3,countReplicas(namesystem,block).corruptReplicas());
namesystem.setReplication(fileName.toString(),(short)1);
try {
Thread.sleep(3000);
}
catch ( InterruptedException ignored) {
}
assertEquals(0,countReplicas(namesystem,block).liveReplicas());
assertEquals(3,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this test, the above
* condition is achieved by increasing the number of good replicas by
* replicating on a new Datanode.
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good replicas
* (2) is less than replication factor (3))
* Start a new data node
* Verify that the a new replica is created and corrupt replica is
* removed.
*/
@Test public void testByAddingAnExtraDataNode() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
DataNodeProperties dnPropsFourth=cluster.stopDataNode(3);
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)3,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
assertEquals(2,countReplicas(namesystem,block).liveReplicas());
assertEquals(1,countReplicas(namesystem,block).corruptReplicas());
cluster.restartDataNode(dnPropsFourth);
DFSTestUtil.waitReplication(fs,fileName,(short)3);
assertEquals(3,countReplicas(namesystem,block).liveReplicas());
assertEquals(0,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. The above condition should hold
* true as long as there is one good replica. This test verifies that.
* The test strategy :
* Bring up Cluster with 2 DataNodes
* Create a file of replication factor 2
* Corrupt one replica of a block of the file
* Verify that there is one good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (1) is less than replication factor (2)).
* Set the replication factor to 1
* Verify that the corrupt replica is removed.
* (corrupt replica should be removed since number of good
* replicas (1) is equal to replication factor (1))
*/
@Test(timeout=20000) public void testWithReplicationFactorAsOne() throws Exception {
Configuration conf=new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2));
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
try {
final Path fileName=new Path("/foo1");
DFSTestUtil.createFile(fs,fileName,2,(short)2,0L);
DFSTestUtil.waitReplication(fs,fileName,(short)2);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName);
corruptBlock(cluster,fs,fileName,0,block);
DFSTestUtil.waitReplication(fs,fileName,(short)1);
assertEquals(1,countReplicas(namesystem,block).liveReplicas());
assertEquals(1,countReplicas(namesystem,block).corruptReplicas());
namesystem.setReplication(fileName.toString(),(short)1);
for (int i=0; i < 10; i++) {
try {
Thread.sleep(1000);
}
catch ( InterruptedException ignored) {
}
if (countReplicas(namesystem,block).corruptReplicas() == 0) {
break;
}
}
assertEquals(1,countReplicas(namesystem,block).liveReplicas());
assertEquals(0,countReplicas(namesystem,block).corruptReplicas());
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Verify that a saveNamespace command brings faulty directories
* in fs.name.dir and fs.edit.dir back online.
*/
@Test(timeout=30000) public void testReinsertnamedirsInSavenamespace() throws Exception {
Configuration conf=getConf();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,true);
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn=FSNamesystem.loadFromDisk(conf);
FSImage originalImage=fsn.getFSImage();
NNStorage storage=originalImage.getStorage();
FSImage spyImage=spy(originalImage);
Whitebox.setInternalState(fsn,"fsImage",spyImage);
FileSystem fs=FileSystem.getLocal(conf);
File rootDir=storage.getStorageDir(0).getRoot();
Path rootPath=new Path(rootDir.getPath(),"current");
final FsPermission permissionNone=new FsPermission((short)0);
final FsPermission permissionAll=new FsPermission(FsAction.ALL,FsAction.READ_EXECUTE,FsAction.READ_EXECUTE);
fs.setPermission(rootPath,permissionNone);
try {
doAnEdit(fsn,1);
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
LOG.info("Doing the first savenamespace.");
fsn.saveNamespace();
LOG.info("First savenamespace sucessful.");
assertTrue("Savenamespace should have marked one directory as bad." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.",storage.getRemovedStorageDirs().size() == 1);
fs.setPermission(rootPath,permissionAll);
LOG.info("Doing the second savenamespace.");
fsn.saveNamespace();
LOG.warn("Second savenamespace sucessful.");
assertTrue("Savenamespace should have been successful in removing " + " bad directories from Image." + " But found " + storage.getRemovedStorageDirs().size() + " bad directories.",storage.getRemovedStorageDirs().size() == 0);
LOG.info("Shutting down fsimage.");
originalImage.close();
fsn.close();
fsn=null;
LOG.info("Loading new FSmage from disk.");
fsn=FSNamesystem.loadFromDisk(conf);
LOG.info("Checking reloaded image.");
checkEditExists(fsn,1);
LOG.info("Reloaded image is good.");
}
finally {
if (rootDir.exists()) {
fs.setPermission(rootPath,permissionAll);
}
if (fsn != null) {
try {
fsn.close();
}
catch ( Throwable t) {
LOG.fatal("Failed to shut down",t);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testTxIdPersistence() throws Exception {
Configuration conf=getConf();
NameNode.initMetrics(conf,NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn=FSNamesystem.loadFromDisk(conf);
try {
assertEquals(1,fsn.getEditLog().getLastWrittenTxId());
doAnEdit(fsn,1);
assertEquals(2,fsn.getEditLog().getLastWrittenTxId());
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fsn.saveNamespace();
assertEquals(4,fsn.getEditLog().getLastWrittenTxId());
fsn.getFSImage().close();
fsn.close();
assertEquals(5,fsn.getEditLog().getLastWrittenTxId());
fsn=null;
fsn=FSNamesystem.loadFromDisk(conf);
assertEquals(6,fsn.getEditLog().getLastWrittenTxId());
}
finally {
if (fsn != null) {
fsn.close();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSecondaryWebUi() throws IOException, MalformedObjectNameException, AttributeNotFoundException, MBeanException, ReflectionException, InstanceNotFoundException {
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=SecondaryNameNode,name=SecondaryNameNodeInfo");
String[] checkpointDir=(String[])mbs.getAttribute(mxbeanName,"CheckpointDirectories");
Assert.assertArrayEquals(checkpointDir,snn.getCheckpointDirectories());
String[] checkpointEditlogDir=(String[])mbs.getAttribute(mxbeanName,"CheckpointEditlogDirectories");
Assert.assertArrayEquals(checkpointEditlogDir,snn.getCheckpointEditlogDirectories());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testName() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
try {
String keyTabDir=System.getProperty("kdc.resource.dir") + "/keytabs";
String nn1KeytabPath=keyTabDir + "/nn1.keytab";
String user1KeyTabPath=keyTabDir + "/user1.keytab";
Configuration conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,"nn1/localhost@EXAMPLE.COM");
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nn1KeytabPath);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
final MiniDFSCluster clusterRef=cluster;
cluster.waitActive();
FileSystem fsForCurrentUser=cluster.getFileSystem();
fsForCurrentUser.mkdirs(new Path("/tmp"));
fsForCurrentUser.setPermission(new Path("/tmp"),new FsPermission((short)511));
UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1KeyTabPath);
FileSystem fs=ugi.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
}
);
try {
Path p=new Path("/users");
fs.mkdirs(p);
fail("user1 must not be allowed to write in /");
}
catch ( IOException expected) {
}
Path p=new Path("/tmp/alpha");
fs.mkdirs(p);
assertNotNull(fs.listStatus(p));
assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testSecureNameNode() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
try {
String nnPrincipal=System.getProperty("dfs.namenode.kerberos.principal");
String nnSpnegoPrincipal=System.getProperty("dfs.namenode.kerberos.internal.spnego.principal");
String nnKeyTab=System.getProperty("dfs.namenode.keytab.file");
assertNotNull("NameNode principal was not specified",nnPrincipal);
assertNotNull("NameNode SPNEGO principal was not specified",nnSpnegoPrincipal);
assertNotNull("NameNode keytab was not specified",nnKeyTab);
Configuration conf=new HdfsConfiguration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,nnPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,nnSpnegoPrincipal);
conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nnKeyTab);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
final MiniDFSCluster clusterRef=cluster;
cluster.waitActive();
FileSystem fsForCurrentUser=cluster.getFileSystem();
fsForCurrentUser.mkdirs(new Path("/tmp"));
fsForCurrentUser.setPermission(new Path("/tmp"),new FsPermission((short)511));
String userPrincipal=System.getProperty("user.principal");
String userKeyTab=System.getProperty("user.keytab");
assertNotNull("User principal was not specified",userPrincipal);
assertNotNull("User keytab was not specified",userKeyTab);
UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI(userPrincipal,userKeyTab);
FileSystem fs=ugi.doAs(new PrivilegedExceptionAction(){
@Override public FileSystem run() throws Exception {
return clusterRef.getFileSystem();
}
}
);
try {
Path p=new Path("/users");
fs.mkdirs(p);
fail("User must not be allowed to write in /");
}
catch ( IOException expected) {
}
Path p=new Path("/tmp/alpha");
fs.mkdirs(p);
assertNotNull(fs.listStatus(p));
assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests transaction logging in dfs.
*/
@Test public void testEditLog() throws IOException {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
FileSystem fileSys=null;
try {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys=cluster.getFileSystem();
final FSNamesystem namesystem=cluster.getNamesystem();
for (Iterator it=cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
File dir=new File(it.next().getPath());
System.out.println(dir);
}
FSImage fsimage=namesystem.getFSImage();
FSEditLog editLog=fsimage.getEditLog();
editLog.setOutputBufferCapacity(2048);
Thread threadId[]=new Thread[NUM_THREADS];
for (int i=0; i < NUM_THREADS; i++) {
Transactions trans=new Transactions(namesystem,NUM_TRANSACTIONS);
threadId[i]=new Thread(trans,"TransactionThread-" + i);
threadId[i].start();
}
for (int i=0; i < NUM_THREADS; i++) {
try {
threadId[i].join();
}
catch ( InterruptedException e) {
i--;
}
}
editLog.close();
namesystem.getDelegationTokenSecretManager().stopThreads();
int numKeys=namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
int expectedTransactions=NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys + 2;
for ( StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
File editFile=NNStorage.getFinalizedEditsFile(sd,1,1 + expectedTransactions - 1);
System.out.println("Verifying file: " + editFile);
FSEditLogLoader loader=new FSEditLogLoader(namesystem,0);
long numEdits=loader.loadFSEdits(new EditLogFileInputStream(editFile),1);
assertEquals("Verification for " + editFile,expectedTransactions,numEdits);
}
}
finally {
if (fileSys != null) fileSys.close();
if (cluster != null) cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher
/**
* Test that collisions in the block ID space are handled gracefully.
* @throws IOException
*/
@Test public void testTriggerBlockIdCollision() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
FSNamesystem fsn=cluster.getNamesystem();
final int blockCount=10;
Path path1=new Path("testBlockIdCollisionDetection_file1.dat");
DFSTestUtil.createFile(fs,path1,IO_SIZE,BLOCK_SIZE * blockCount,BLOCK_SIZE,REPLICATION,SEED);
List blocks1=DFSTestUtil.getAllBlocks(fs,path1);
SequentialBlockIdGenerator blockIdGenerator=fsn.getBlockIdGenerator();
blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);
Path path2=new Path("testBlockIdCollisionDetection_file2.dat");
DFSTestUtil.createFile(fs,path2,IO_SIZE,BLOCK_SIZE * blockCount,BLOCK_SIZE,REPLICATION,SEED);
List blocks2=DFSTestUtil.getAllBlocks(fs,path2);
assertThat(blocks2.size(),is(blockCount));
assertThat(blocks2.get(0).getBlock().getBlockId(),is(blocks1.get(9).getBlock().getBlockId() + 1));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier ConditionMatcher
/**
* Test that the generation stamp for legacy and new blocks is updated
* as expected.
* @throws IOException
*/
@Test public void testGenerationStampUpdate() throws IOException {
FSNamesystem fsn=mock(FSNamesystem.class);
FSEditLog editLog=mock(FSEditLog.class);
final long nextGenerationStampV1=5000;
final long nextGenerationStampV2=20000;
when(fsn.getNextGenerationStampV1()).thenReturn(nextGenerationStampV1);
when(fsn.getNextGenerationStampV2()).thenReturn(nextGenerationStampV2);
when(fsn.nextGenerationStamp(anyBoolean())).thenCallRealMethod();
when(fsn.hasWriteLock()).thenReturn(true);
when(fsn.getEditLog()).thenReturn(editLog);
assertThat(fsn.nextGenerationStamp(true),is(nextGenerationStampV1));
assertThat(fsn.nextGenerationStamp(false),is(nextGenerationStampV2));
}
InternalCallVerifier ConditionMatcher
/**
* Test that the block type (legacy or not) can be correctly detected
* based on its generation stamp.
* @throws IOException
*/
@Test public void testBlockTypeDetection() throws IOException {
FSNamesystem fsn=mock(FSNamesystem.class);
final long maxGenStampForLegacyBlocks=10000;
when(fsn.getGenerationStampV1Limit()).thenReturn(maxGenStampForLegacyBlocks);
Block legacyBlock=spy(new Block());
when(legacyBlock.getGenerationStamp()).thenReturn(maxGenStampForLegacyBlocks / 2);
Block newBlock=spy(new Block());
when(newBlock.getGenerationStamp()).thenReturn(maxGenStampForLegacyBlocks + 1);
when(fsn.isLegacyBlock(any(Block.class))).thenCallRealMethod();
assertThat(fsn.isLegacyBlock(legacyBlock),is(true));
assertThat(fsn.isLegacyBlock(newBlock),is(false));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier ConditionMatcher
/**
* Test that block IDs are generated sequentially.
* @throws IOException
*/
@Test public void testBlockIdGeneration() throws IOException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
FileSystem fs=cluster.getFileSystem();
Path path=new Path("testBlockIdGeneration.dat");
DFSTestUtil.createFile(fs,path,IO_SIZE,BLOCK_SIZE * 10,BLOCK_SIZE,REPLICATION,SEED);
List blocks=DFSTestUtil.getAllBlocks(fs,path);
LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
long nextBlockExpectedId=blocks.get(0).getBlock().getBlockId() + 1;
for (int i=1; i < blocks.size(); ++i) {
long nextBlockId=blocks.get(i).getBlock().getBlockId();
LOG.info("Block" + i + " id is "+ nextBlockId);
assertThat(nextBlockId,is(nextBlockExpectedId));
++nextBlockExpectedId;
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file while modifying file after snapshot.
*/
@Test(timeout=15000) public void testSnapshotPathINodesAfterModification() throws Exception {
String[] names=INode.getPathNames(file1.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString());
final long modTime=inodes[inodes.length - 1].getModificationTime();
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s3");
DFSTestUtil.appendFile(hdfs,file1,"the content for appending");
String snapshotPath=sub1.toString() + "/.snapshot/s3/file1";
names=INode.getPathNames(snapshotPath);
components=INode.getPathComponents(names);
INodesInPath ssNodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] ssInodes=ssNodesInPath.getINodes();
assertEquals(ssInodes.length,components.length - 1);
final Snapshot s3=getSnapshot(ssNodesInPath,"s3");
assertSnapshot(ssNodesInPath,true,s3,3);
INode snapshotFileNode=ssInodes[ssInodes.length - 1];
assertEquals(snapshotFileNode.getLocalName(),file1.getName());
assertTrue(snapshotFileNode.asFile().isWithSnapshot());
assertEquals(modTime,snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));
names=INode.getPathNames(file1.toString());
components=INode.getPathComponents(names);
INodesInPath newNodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
assertSnapshot(newNodesInPath,false,s3,-1);
INode[] newInodes=newNodesInPath.getINodes();
assertEquals(newInodes.length,components.length);
final int last=components.length - 1;
assertEquals(newInodes[last].getFullPathName(),file1.toString());
Assert.assertFalse(modTime == newInodes[last].getModificationTime());
hdfs.deleteSnapshot(sub1,"s3");
hdfs.disallowSnapshot(sub1);
}
InternalCallVerifier BooleanVerifier
/**
* Test allow-snapshot operation.
*/
@Test(timeout=15000) public void testAllowSnapshot() throws Exception {
final String pathStr=sub1.toString();
final INode before=fsdir.getINode(pathStr);
Assert.assertFalse(before.asDirectory().isSnapshottable());
final Path path=new Path(pathStr);
hdfs.allowSnapshot(path);
{
final INode after=fsdir.getINode(pathStr);
Assert.assertTrue(after.asDirectory().isSnapshottable());
}
hdfs.disallowSnapshot(path);
{
final INode after=fsdir.getINode(pathStr);
Assert.assertFalse(after.asDirectory().isSnapshottable());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for normal (non-snapshot) file.
*/
@Test(timeout=15000) public void testNonSnapshotPathINodes() throws Exception {
String[] names=INode.getPathNames(file1.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertSnapshot(nodesInPath,false,null,-1);
assertTrue("file1=" + file1 + ", nodesInPath="+ nodesInPath,inodes[components.length - 1] != null);
assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString());
assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString());
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,1);
assertSnapshot(nodesInPath,false,null,-1);
assertEquals(inodes[0].getFullPathName(),file1.toString());
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,2);
assertSnapshot(nodesInPath,false,null,-1);
assertEquals(inodes[1].getFullPathName(),file1.toString());
assertEquals(inodes[0].getFullPathName(),sub1.toString());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file while adding a new file after snapshot.
*/
@Test(timeout=15000) public void testSnapshotPathINodesWithAddedFile() throws Exception {
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s4");
final Path file3=new Path(sub1,"file3");
DFSTestUtil.createFile(hdfs,file3,1024,REPLICATION,seed);
{
String snapshotPath=sub1.toString() + "/.snapshot/s4/file3";
String[] names=INode.getPathNames(snapshotPath);
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
assertEquals(nodesInPath.getNumNonNull(),components.length - 2);
s4=getSnapshot(nodesInPath,"s4");
assertSnapshot(nodesInPath,true,s4,3);
assertNull(inodes[inodes.length - 1]);
}
String[] names=INode.getPathNames(file3.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertSnapshot(nodesInPath,false,s4,-1);
assertEquals(inodes[components.length - 1].getFullPathName(),file3.toString());
assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString());
hdfs.deleteSnapshot(sub1,"s4");
hdfs.disallowSnapshot(sub1);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file.
*/
@Test(timeout=15000) public void testSnapshotPathINodes() throws Exception {
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s1");
String snapshotPath=sub1.toString() + "/.snapshot/s1/file1";
String[] names=INode.getPathNames(snapshotPath);
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
final Snapshot snapshot=getSnapshot(nodesInPath,"s1");
assertSnapshot(nodesInPath,true,snapshot,3);
INode snapshotFileNode=inodes[inodes.length - 1];
assertINodeFile(snapshotFileNode,file1);
assertTrue(snapshotFileNode.getParent().isWithSnapshot());
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,1);
assertSnapshot(nodesInPath,true,snapshot,-1);
assertINodeFile(nodesInPath.getLastINode(),file1);
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,2);
assertSnapshot(nodesInPath,true,snapshot,0);
assertINodeFile(nodesInPath.getLastINode(),file1);
String dotSnapshotPath=sub1.toString() + "/.snapshot";
names=INode.getPathNames(dotSnapshotPath);
components=INode.getPathComponents(names);
nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
assertSnapshot(nodesInPath,true,snapshot,-1);
final INode last=nodesInPath.getLastINode();
assertEquals(last.getFullPathName(),sub1.toString());
assertFalse(last instanceof INodeFile);
String[] invalidPathComponent={"invalidDir","foo",".snapshot","bar"};
Path invalidPath=new Path(invalidPathComponent[0]);
for (int i=1; i < invalidPathComponent.length; i++) {
invalidPath=new Path(invalidPath,invalidPathComponent[i]);
try {
hdfs.getFileStatus(invalidPath);
Assert.fail();
}
catch ( FileNotFoundException fnfe) {
System.out.println("The exception is expected: " + fnfe);
}
}
hdfs.deleteSnapshot(sub1,"s1");
hdfs.disallowSnapshot(sub1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)}
* for snapshot file after deleting the original file.
*/
@Test(timeout=15000) public void testSnapshotPathINodesAfterDeletion() throws Exception {
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,"s2");
hdfs.delete(file1,false);
final Snapshot snapshot;
{
String snapshotPath=sub1.toString() + "/.snapshot/s2/file1";
String[] names=INode.getPathNames(snapshotPath);
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length - 1);
snapshot=getSnapshot(nodesInPath,"s2");
assertSnapshot(nodesInPath,true,snapshot,3);
final INode inode=inodes[inodes.length - 1];
assertEquals(file1.getName(),inode.getLocalName());
assertTrue(inode.asFile().isWithSnapshot());
}
String[] names=INode.getPathNames(file1.toString());
byte[][] components=INode.getPathComponents(names);
INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components);
INode[] inodes=nodesInPath.getINodes();
assertEquals(inodes.length,components.length);
assertEquals(nodesInPath.getNumNonNull(),components.length - 1);
assertSnapshot(nodesInPath,false,snapshot,-1);
assertNull(inodes[components.length - 1]);
assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString());
assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString());
hdfs.deleteSnapshot(sub1,"s2");
hdfs.disallowSnapshot(sub1);
}
UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* secnn-7
* secondary node copies fsimage and edits into correct separate directories.
* @throws IOException
*/
@Test public void testSNNStartup() throws IOException {
LOG.info("--starting SecondNN startup test");
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,fileAsURI(new File(hdfsDir,"name")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,fileAsURI(new File(hdfsDir,"name")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,fileAsURI(new File(hdfsDir,"chkpt_edits")).toString());
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,fileAsURI(new File(hdfsDir,"chkpt")).toString());
LOG.info("--starting NN ");
MiniDFSCluster cluster=null;
SecondaryNameNode sn=null;
NameNode nn=null;
try {
cluster=new MiniDFSCluster.Builder(config).manageDataDfsDirs(false).manageNameDfsDirs(false).build();
cluster.waitActive();
nn=cluster.getNameNode();
assertNotNull(nn);
LOG.info("--starting SecondNN");
sn=new SecondaryNameNode(config);
assertNotNull(sn);
LOG.info("--doing checkpoint");
sn.doCheckpoint();
LOG.info("--done checkpoint");
FSImage image=nn.getFSImage();
StorageDirectory sd=image.getStorage().getStorageDir(0);
assertEquals(sd.getStorageDirType(),NameNodeDirType.IMAGE_AND_EDITS);
image.getStorage();
File imf=NNStorage.getStorageFile(sd,NameNodeFile.IMAGE,0);
image.getStorage();
File edf=NNStorage.getStorageFile(sd,NameNodeFile.EDITS,0);
LOG.info("--image file " + imf.getAbsolutePath() + "; len = "+ imf.length());
LOG.info("--edits file " + edf.getAbsolutePath() + "; len = "+ edf.length());
FSImage chkpImage=sn.getFSImage();
verifyDifferentDirs(chkpImage,imf.length(),edf.length());
}
catch ( IOException e) {
fail(StringUtils.stringifyException(e));
System.err.println("checkpoint failed");
throw e;
}
finally {
if (sn != null) sn.shutdown();
if (cluster != null) cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testXattrConfiguration() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,-1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fail("Expected exception with negative xattr size");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative value for the maximum size of an xattr",e);
}
finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
try {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,-1);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
fail("Expected exception with negative # xattrs per inode");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative limit on the number of xattrs per inode",e);
}
finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
try {
final LogVerificationAppender appender=new LogVerificationAppender();
final Logger logger=Logger.getRootLogger();
logger.addAppender(appender);
int count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)");
assertEquals("Expected no messages about unlimited xattr size",0,count);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,0);
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)");
assertEquals("Expected unlimited xattr size",2,count);
}
finally {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT);
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testCompression() throws IOException {
LOG.info("Test compressing image.");
Configuration conf=new Configuration();
FileSystem.setDefaultUri(conf,"hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,"127.0.0.1:0");
File base_dir=new File(PathUtils.getTestDir(getClass()),"dfs/");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,new File(base_dir,"name").getPath());
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
DFSTestUtil.formatNameNode(conf);
LOG.info("Create an uncompressed fsimage");
NameNode namenode=new NameNode(conf);
namenode.getNamesystem().mkdirs("/test",new PermissionStatus("hairong",null,FsPermission.getDefault()),true);
NamenodeProtocols nnRpc=namenode.getRpcServer();
assertTrue(nnRpc.getFileInfo("/test").isDir());
nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
nnRpc.saveNamespace();
namenode.stop();
namenode.join();
LOG.info("Read an uncomressed image and store it compressed using default codec.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,true);
checkNameSpace(conf);
LOG.info("Read a compressed image and store it using a different codec.");
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,"org.apache.hadoop.io.compress.GzipCodec");
checkNameSpace(conf);
LOG.info("Read an compressed iamge and store it as uncompressed.");
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,false);
checkNameSpace(conf);
LOG.info("Read an uncompressed image and store it as uncompressed.");
checkNameSpace(conf);
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* This test tests hosts include list contains host names. After namenode
* restarts, the still alive datanodes should not have any trouble in getting
* registrant again.
*/
@Test public void testNNRestart() throws IOException, InterruptedException {
MiniDFSCluster cluster=null;
FileSystem localFileSys;
Path hostsFile;
Path excludeFile;
int HEARTBEAT_INTERVAL=1;
localFileSys=FileSystem.getLocal(config);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,"build/test/data/work-dir/restartnn");
hostsFile=new Path(dir,"hosts");
excludeFile=new Path(dir,"exclude");
config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
writeConfigFile(localFileSys,excludeFile,null);
config.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath());
ArrayList list=new ArrayList();
byte b[]={127,0,0,1};
InetAddress inetAddress=InetAddress.getByAddress(b);
list.add(inetAddress.getHostName());
writeConfigFile(localFileSys,hostsFile,list);
int numDatanodes=1;
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build();
cluster.waitActive();
cluster.restartNameNode();
NamenodeProtocols nn=cluster.getNameNodeRpc();
assertNotNull(nn);
assertTrue(cluster.isDataNodeUp());
DatanodeInfo[] info=nn.getDatanodeReport(DatanodeReportType.LIVE);
for (int i=0; i < 5 && info.length != numDatanodes; i++) {
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
}
assertEquals("Number of live nodes should be " + numDatanodes,numDatanodes,info.length);
}
catch ( IOException e) {
fail(StringUtils.stringifyException(e));
throw e;
}
finally {
cleanupFile(localFileSys,excludeFile.getParent());
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Tests the upgrade from version 0.20.204 to Federation version Test without
* clusterid the case: -upgrade
* Expected to generate clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFrom204() throws Exception {
layoutVersion=Feature.RESERVED_REL20_204.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertTrue("Clusterid should start with CID",storage.getClusterID().startsWith("CID"));
}
InternalCallVerifier EqualityVerifier
/**
* Tests the upgrade from version 0.22 to Federation version Test with
* clusterid case: -upgrade -clusterid
* Expected to reuse user given clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFrom22WithCID() throws Exception {
startOpt.setClusterId("cid");
layoutVersion=Feature.RESERVED_REL22.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertEquals("Clusterid should match with the given clusterid","cid",storage.getClusterID());
}
InternalCallVerifier EqualityVerifier
/**
* Tests the upgrade from one version of Federation to another Federation
* version Test with wrong clusterid case: -upgrade -clusterid
* Expected to reuse existing clusterid and ignore user given clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFromFederationWithWrongCID() throws Exception {
startOpt.setClusterId("wrong-cid");
storage.setClusterID("currentcid");
layoutVersion=Feature.FEDERATION.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertEquals("Clusterid should match with the existing one","currentcid",storage.getClusterID());
}
InternalCallVerifier EqualityVerifier
/**
* Tests the upgrade from one version of Federation to another Federation
* version Test without clusterid case: -upgrade
* Expected to reuse existing clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFromFederation() throws Exception {
storage.setClusterID("currentcid");
layoutVersion=Feature.FEDERATION.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertEquals("Clusterid should match with the existing one","currentcid",storage.getClusterID());
}
InternalCallVerifier EqualityVerifier
/**
* Tests the upgrade from one version of Federation to another Federation
* version Test with correct clusterid case: -upgrade -clusterid
* Expected to reuse existing clusterid and ignore user given clusterid
* @throws Exception
*/
@Test public void testStartupOptUpgradeFromFederationWithCID() throws Exception {
startOpt.setClusterId("currentcid");
storage.setClusterID("currentcid");
layoutVersion=Feature.FEDERATION.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt,layoutVersion);
assertEquals("Clusterid should match with the existing one","currentcid",storage.getClusterID());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test dfsadmin -restoreFailedStorage command
* @throws Exception
*/
@Test public void testDfsAdminCmd() throws Exception {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(2).manageNameDfsDirs(false).build();
cluster.waitActive();
try {
FSImage fsi=cluster.getNameNode().getFSImage();
boolean restore=fsi.getStorage().getRestoreFailedStorage();
LOG.info("Restore is " + restore);
assertEquals(restore,true);
String cmd="-fs NAMENODE -restoreFailedStorage false";
String namenode=config.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///");
CommandExecutor executor=new CLITestCmdDFS(cmd,new CLICommandDFSAdmin()).getExecutor(namenode);
executor.executeCommand(cmd);
restore=fsi.getStorage().getRestoreFailedStorage();
assertFalse("After set true call restore is " + restore,restore);
cmd="-fs NAMENODE -restoreFailedStorage true";
executor.executeCommand(cmd);
restore=fsi.getStorage().getRestoreFailedStorage();
assertTrue("After set false call restore is " + restore,restore);
cmd="-fs NAMENODE -restoreFailedStorage check";
CommandExecutor.Result cmdResult=executor.executeCommand(cmd);
restore=fsi.getStorage().getRestoreFailedStorage();
assertTrue("After check call restore is " + restore,restore);
String commandOutput=cmdResult.getCommandOutput();
commandOutput.trim();
assertTrue(commandOutput.contains("restoreFailedStorage is set to true"));
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* 1. create DFS cluster with 3 storage directories
* - 2 EDITS_IMAGE(name1, name2), 1 EDITS(name3)
* 2. create a file
* 3. corrupt/disable name2 and name3 by removing rwx permission
* 4. run doCheckpoint
* - will fail on removed dirs (which invalidates them)
* 5. write another file
* 6. check there is only one healthy storage dir
* 7. run doCheckpoint - recover should fail but checkpoint should succeed
* 8. check there is still only one healthy storage dir
* 9. restore the access permission for name2 and name 3, run checkpoint again
* 10.verify there are 3 healthy storage dirs.
*/
@Test public void testStorageRestoreFailure() throws Exception {
SecondaryNameNode secondary=null;
String nameDir2=Shell.WINDOWS ? (new File(path2,"current").getAbsolutePath()) : path2.toString();
String nameDir3=Shell.WINDOWS ? (new File(path3,"current").getAbsolutePath()) : path3.toString();
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(0).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=new SecondaryNameNode(config);
printStorages(cluster.getNameNode().getFSImage());
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/","test");
assertTrue(fs.mkdirs(path));
assertTrue(FileUtil.chmod(nameDir2,"000") == 0);
assertTrue(FileUtil.chmod(nameDir3,"000") == 0);
secondary.doCheckpoint();
printStorages(cluster.getNameNode().getFSImage());
path=new Path("/","test1");
assertTrue(fs.mkdirs(path));
assert (cluster.getNameNode().getFSImage().getStorage().getNumStorageDirs() == 1);
secondary.doCheckpoint();
assert (cluster.getNameNode().getFSImage().getStorage().getNumStorageDirs() == 1);
assertTrue(FileUtil.chmod(nameDir2,"755") == 0);
assertTrue(FileUtil.chmod(nameDir3,"755") == 0);
secondary.doCheckpoint();
assert (cluster.getNameNode().getFSImage().getStorage().getNumStorageDirs() == 3);
}
finally {
if (path2.exists()) {
FileUtil.chmod(nameDir2,"755");
}
if (path3.exists()) {
FileUtil.chmod(nameDir3,"755");
}
if (cluster != null) {
cluster.shutdown();
}
if (secondary != null) {
secondary.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* Test to simulate interleaved checkpointing by 2 2NNs after a storage
* directory has been taken offline. The first will cause the directory to
* come back online, but it won't have any valid contents. The second 2NN will
* then try to perform a checkpoint. The NN should not serve up the image or
* edits from the restored (empty) dir.
*/
@Test public void testMultipleSecondaryCheckpoint() throws IOException {
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(config).numDataNodes(1).manageNameDfsDirs(false).build();
cluster.waitActive();
secondary=new SecondaryNameNode(config);
FSImage fsImage=cluster.getNameNode().getFSImage();
printStorages(fsImage);
FileSystem fs=cluster.getFileSystem();
Path testPath=new Path("/","test");
assertTrue(fs.mkdirs(testPath));
printStorages(fsImage);
invalidateStorage(fsImage,ImmutableSet.of(path1));
cluster.getNameNodeRpc().rollEditLog();
printStorages(fsImage);
secondary.doCheckpoint();
printStorages(fsImage);
assertTrue("path exists before restart",fs.exists(testPath));
secondary.shutdown();
cluster.restartNameNode();
assertTrue("path should still exist after restart",fs.exists(testPath));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
if (secondary != null) {
secondary.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier
/**
* test
* 1. create DFS cluster with 3 storage directories - 2 EDITS_IMAGE, 1 EDITS
* 2. create a cluster and write a file
* 3. corrupt/disable one storage (or two) by removing
* 4. run doCheckpoint - it will fail on removed dirs (which
* will invalidate the storages)
* 5. write another file
* 6. check that edits and fsimage differ
* 7. run doCheckpoint
* 8. verify that all the image and edits files are the same.
*/
@Test public void testStorageRestore() throws Exception {
int numDatanodes=0;
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).manageNameDfsDirs(false).build();
cluster.waitActive();
SecondaryNameNode secondary=new SecondaryNameNode(config);
System.out.println("****testStorageRestore: Cluster and SNN started");
printStorages(cluster.getNameNode().getFSImage());
FileSystem fs=cluster.getFileSystem();
Path path=new Path("/","test");
assertTrue(fs.mkdirs(path));
System.out.println("****testStorageRestore: dir 'test' created, invalidating storage...");
invalidateStorage(cluster.getNameNode().getFSImage(),ImmutableSet.of(path2,path3));
printStorages(cluster.getNameNode().getFSImage());
System.out.println("****testStorageRestore: storage invalidated");
path=new Path("/","test1");
assertTrue(fs.mkdirs(path));
System.out.println("****testStorageRestore: dir 'test1' created");
FSImageTestUtil.assertFileContentsDifferent(2,new File(path1,"current/" + getInProgressEditsFileName(1)),new File(path2,"current/" + getInProgressEditsFileName(1)),new File(path3,"current/" + getInProgressEditsFileName(1)));
FSImageTestUtil.assertFileContentsSame(new File(path2,"current/" + getInProgressEditsFileName(1)),new File(path3,"current/" + getInProgressEditsFileName(1)));
System.out.println("****testStorageRestore: checkfiles(false) run");
secondary.doCheckpoint();
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getImageFileName(4)),new File(path2,"current/" + getImageFileName(4)));
assertFalse("Should not have any image in an edits-only directory",new File(path3,"current/" + getImageFileName(4)).exists());
assertTrue("Should have finalized logs in the directory that didn't fail",new File(path1,"current/" + getFinalizedEditsFileName(1,4)).exists());
assertFalse("Should not have finalized logs in the failed directories",new File(path2,"current/" + getFinalizedEditsFileName(1,4)).exists());
assertFalse("Should not have finalized logs in the failed directories",new File(path3,"current/" + getFinalizedEditsFileName(1,4)).exists());
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getInProgressEditsFileName(5)),new File(path2,"current/" + getInProgressEditsFileName(5)),new File(path3,"current/" + getInProgressEditsFileName(5)));
String md5BeforeEdit=FSImageTestUtil.getFileMD5(new File(path1,"current/" + getInProgressEditsFileName(5)));
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getImageFileName(0)),new File(path2,"current/" + getImageFileName(0)));
path=new Path("/","test2");
assertTrue(fs.mkdirs(path));
String md5AfterEdit=FSImageTestUtil.getFileMD5(new File(path1,"current/" + getInProgressEditsFileName(5)));
assertFalse(md5BeforeEdit.equals(md5AfterEdit));
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getInProgressEditsFileName(5)),new File(path2,"current/" + getInProgressEditsFileName(5)),new File(path3,"current/" + getInProgressEditsFileName(5)));
secondary.shutdown();
cluster.shutdown();
FSImageTestUtil.assertFileContentsSame(new File(path1,"current/" + getFinalizedEditsFileName(5,7)),new File(path2,"current/" + getFinalizedEditsFileName(5,7)),new File(path3,"current/" + getFinalizedEditsFileName(5,7)));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for the case where the shared edits dir doesn't have
* all of the recent edit logs.
*/
@Test public void testSharedEditsMissingLogs() throws Exception {
removeStandbyNameDirs();
CheckpointSignature sig=nn0.getRpcServer().rollEditLog();
assertEquals(3,sig.getCurSegmentTxId());
URI editsUri=cluster.getSharedEditsDir(0,1);
File editsDir=new File(editsUri);
File editsSegment=new File(new File(editsDir,"current"),NNStorage.getFinalizedEditsFileName(1,2));
GenericTestUtils.assertExists(editsSegment);
assertTrue(editsSegment.delete());
LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(BootstrapStandby.class));
try {
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE,rc);
}
finally {
logs.stopCapturing();
}
GenericTestUtils.assertMatches(logs.getOutput(),"FATAL.*Unable to read transaction ids 1-3 from the configured shared");
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test for the base success case. The primary NN
* hasn't made any checkpoints, and we copy the fsimage_0
* file over and start up.
*/
@Test public void testSuccessfulBaseCase() throws Exception {
removeStandbyNameDirs();
try {
cluster.restartNameNode(1);
fail("Did not throw");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("storage directory does not exist or is not accessible",ioe);
}
int rc=BootstrapStandby.run(new String[]{"-nonInteractive"},cluster.getConfiguration(1));
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0));
FSImageTestUtil.assertNNFilesMatch(cluster);
cluster.restartNameNode(1);
}
InternalCallVerifier EqualityVerifier
/**
* Test that, even if the other node is not active, we are able
* to bootstrap standby from it.
*/
@Test(timeout=30000) public void testOtherNodeNotActive() throws Exception {
cluster.transitionToStandby(0);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
}
InternalCallVerifier EqualityVerifier
@Test public void testStandbyDirsAlreadyExist() throws Exception {
int rc=BootstrapStandby.run(new String[]{"-nonInteractive"},cluster.getConfiguration(1));
assertEquals(BootstrapStandby.ERR_CODE_ALREADY_FORMATTED,rc);
rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test for downloading a checkpoint made at a later checkpoint
* from the active.
*/
@Test public void testDownloadingLaterCheckpoint() throws Exception {
nn0.getRpcServer().rollEditLog();
nn0.getRpcServer().rollEditLog();
NameNodeAdapter.enterSafeMode(nn0,false);
NameNodeAdapter.saveNamespace(nn0);
NameNodeAdapter.leaveSafeMode(nn0);
long expectedCheckpointTxId=NameNodeAdapter.getNamesystem(nn0).getFSImage().getMostRecentCheckpointTxId();
assertEquals(6,expectedCheckpointTxId);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of((int)expectedCheckpointTxId));
FSImageTestUtil.assertNNFilesMatch(cluster);
cluster.restartNameNode(1);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* BootstrapStandby when the existing NN is standby
*/
@Test public void testBootstrapStandbyWithStandbyNN() throws Exception {
cluster.transitionToStandby(0);
Configuration confNN1=cluster.getConfiguration(1);
cluster.shutdownNameNode(1);
int rc=BootstrapStandby.run(new String[]{"-force"},confNN1);
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* BootstrapStandby when the existing NN is active
*/
@Test public void testBootstrapStandbyWithActiveNN() throws Exception {
cluster.transitionToActive(0);
Configuration confNN1=cluster.getConfiguration(1);
cluster.shutdownNameNode(1);
int rc=BootstrapStandby.run(new String[]{"-force"},confNN1);
assertEquals(0,rc);
FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Make sure that starting a second NN with the -upgrade flag fails if the
* other NN has already done that.
*/
@Test public void testCannotUpgradeSecondNameNode() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
cluster.getNameNodeInfos()[1].setStartOpt(StartupOption.UPGRADE);
try {
cluster.restartNameNode(1,false);
fail("Should not have been able to start second NN with -upgrade");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("It looks like the shared log is already being upgraded",ioe);
}
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Ensure that an admin cannot finalize an HA upgrade without at least one NN
* being active.
*/
@Test public void testCannotFinalizeIfNoActive() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
cluster.transitionToStandby(1);
try {
runFinalizeCommand(cluster);
fail("Should not have been able to finalize upgrade with no NN active");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Cannot finalize with no NameNode active",ioe);
}
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that an HA NN with NFS-based HA can successfully start and
* upgrade.
*/
@Test public void testNfsUpgrade() throws IOException, URISyntaxException {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFinalizeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
assertTrue(fs.mkdirs(new Path("/foo2")));
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
runFinalizeCommand(cluster);
checkClusterPreviousDirExistence(cluster,false);
checkJnPreviousDirExistence(qjCluster,false);
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that an HA NN can successfully upgrade when configured using
* JournalNodes.
*/
@Test public void testUpgradeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(fs.mkdirs(new Path("/foo3")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.mkdirs(new Path("/foo4")));
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test rollback with NFS shared dir.
*/
@Test public void testRollbackWithNfs() throws Exception {
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
File sharedDir=new File(cluster.getSharedEditsDir(0,1));
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkPreviousDirExistence(sharedDir,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkPreviousDirExistence(sharedDir,true);
assertCTimesEqual(cluster);
Collection nn1NameDirs=cluster.getNameDirs(0);
cluster.shutdown();
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs));
NameNode.doRollback(conf,false);
checkNnPreviousDirExistence(cluster,0,false);
checkPreviousDirExistence(sharedDir,false);
}
finally {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRollbackWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
checkJnPreviousDirExistence(qjCluster,false);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
assertTrue(fs.mkdirs(new Path("/foo2")));
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
assertCTimesEqual(cluster);
Collection nn1NameDirs=cluster.getNameDirs(0);
cluster.shutdown();
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs));
NameNode.doRollback(conf,false);
checkNnPreviousDirExistence(cluster,0,false);
checkJnPreviousDirExistence(qjCluster,false);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that even if the NN which initiated the upgrade is in the standby
* state that we're allowed to finalize.
*/
@Test public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException {
MiniQJMHACluster qjCluster=null;
FileSystem fs=null;
try {
Builder builder=new MiniQJMHACluster.Builder(conf);
builder.getDfsBuilder().numDataNodes(0);
qjCluster=builder.build();
MiniDFSCluster cluster=qjCluster.getDfsCluster();
checkJnPreviousDirExistence(qjCluster,false);
checkClusterPreviousDirExistence(cluster,false);
assertCTimesEqual(cluster);
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/foo1")));
cluster.shutdownNameNode(1);
cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
cluster.restartNameNode(0,false);
checkNnPreviousDirExistence(cluster,0,true);
checkNnPreviousDirExistence(cluster,1,false);
checkJnPreviousDirExistence(qjCluster,true);
int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1));
assertEquals(0,rc);
cluster.restartNameNode(1);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
runFinalizeCommand(cluster);
checkClusterPreviousDirExistence(cluster,false);
checkJnPreviousDirExistence(qjCluster,false);
assertCTimesEqual(cluster);
}
finally {
if (fs != null) {
fs.close();
}
if (qjCluster != null) {
qjCluster.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testManualFailoverWithDFSHAAdmin() throws Exception {
DFSHAAdmin tool=new DFSHAAdmin();
tool.setConf(conf);
assertEquals(0,tool.run(new String[]{"-failover","nn1","nn2"}));
waitForHAState(0,HAServiceState.STANDBY);
waitForHAState(1,HAServiceState.ACTIVE);
assertEquals(0,tool.run(new String[]{"-failover","nn2","nn1"}));
waitForHAState(0,HAServiceState.ACTIVE);
waitForHAState(1,HAServiceState.STANDBY);
}
TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier
@Before public void setup() throws Exception {
conf=new Configuration();
conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1",hostPort);
conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,AlwaysSucceedFencer.class.getName());
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0);
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1",10023);
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2",10024);
MiniDFSNNTopology topology=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021)).addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022)));
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
cluster.waitActive();
ctx=new TestContext();
ctx.addThread(thr1=new ZKFCThread(ctx,0));
assertEquals(0,thr1.zkfc.run(new String[]{"-formatZK"}));
thr1.start();
waitForHAState(0,HAServiceState.ACTIVE);
ctx.addThread(thr2=new ZKFCThread(ctx,1));
thr2.start();
ZKFCTestUtil.waitForHealthState(thr1.zkfc,HealthMonitor.State.SERVICE_HEALTHY,ctx);
ZKFCTestUtil.waitForHealthState(thr2.zkfc,HealthMonitor.State.SERVICE_HEALTHY,ctx);
fs=HATestUtil.configureFailoverFs(cluster,conf);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that automatic failover is triggered by shutting the
* active NN down.
*/
@Test(timeout=60000) public void testFailoverAndBackOnNNShutdown() throws Exception {
Path p1=new Path("/dir1");
Path p2=new Path("/dir2");
fs.mkdirs(p1);
cluster.shutdownNameNode(0);
assertTrue(fs.exists(p1));
fs.mkdirs(p2);
assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),thr1.zkfc.getLocalTarget().getAddress());
cluster.restartNameNode(0);
waitForHAState(0,HAServiceState.STANDBY);
assertTrue(fs.exists(p1));
assertTrue(fs.exists(p2));
cluster.shutdownNameNode(1);
waitForHAState(0,HAServiceState.ACTIVE);
assertTrue(fs.exists(p1));
assertTrue(fs.exists(p2));
assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),thr2.zkfc.getLocalTarget().getAddress());
}
InternalCallVerifier EqualityVerifier
/**
* Another regression test for HDFS-2742. This tests the following sequence:
* - DN does a block report while file is open. This BR contains
* the block in RBW state.
* - The block report is delayed in reaching the standby.
* - The file is closed.
* - The standby processes the OP_ADD and OP_CLOSE operations before
* the RBW block report arrives.
* - The standby should not mark the block as corrupt.
*/
@Test public void testRBWReportArrivesAfterEdits() throws Exception {
final CountDownLatch brFinished=new CountDownLatch(1);
DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG){
@Override protected Object passThrough( InvocationOnMock invocation) throws Throwable {
try {
return super.passThrough(invocation);
}
finally {
brFinished.countDown();
}
}
}
;
FSDataOutputStream out=fs.create(TEST_FILE_PATH);
try {
AppendTestUtil.write(out,0,10);
out.hflush();
DataNode dn=cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy=DataNodeTestUtils.spyOnBposToNN(dn,nn2);
Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.anyObject(),Mockito.anyString(),Mockito.anyObject());
dn.scheduleAllBlockReport(0);
delayer.waitForCall();
}
finally {
IOUtils.closeStream(out);
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
delayer.proceed();
brFinished.await();
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
assertEquals(0,nn1.getNamesystem().getCorruptReplicaBlocks());
assertEquals(0,nn2.getNamesystem().getCorruptReplicaBlocks());
DFSTestUtil.readFile(fs,TEST_FILE_PATH);
}
InternalCallVerifier EqualityVerifier
/**
* Test that, when a block is re-opened for append, the related
* datanode messages are correctly queued by the SBN because
* they have future states and genstamps.
*/
@Test public void testQueueingWithAppend() throws Exception {
int numQueued=0;
int numDN=cluster.getDataNodes().size();
FSDataOutputStream out=fs.create(TEST_FILE_PATH);
try {
AppendTestUtil.write(out,0,10);
out.hflush();
numQueued+=numDN;
}
finally {
IOUtils.closeStream(out);
numQueued+=numDN;
}
cluster.triggerBlockReports();
numQueued+=numDN;
try {
out=fs.append(TEST_FILE_PATH);
AppendTestUtil.write(out,10,10);
numQueued+=numDN;
}
finally {
IOUtils.closeStream(out);
numQueued+=numDN;
}
cluster.triggerBlockReports();
numQueued+=numDN;
assertEquals(numQueued,cluster.getNameNode(1).getNamesystem().getPendingDataNodeMessageCount());
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
assertEquals(0,nn1.getNamesystem().getCorruptReplicaBlocks());
assertEquals(0,nn2.getNamesystem().getCorruptReplicaBlocks());
AppendTestUtil.check(fs,TEST_FILE_PATH,20);
}
InternalCallVerifier EqualityVerifier
/**
* Test case which restarts the standby node in such a way that,
* when it exits safemode, it will want to invalidate a bunch
* of over-replicated block replicas. Ensures that if we failover
* at this point it won't lose data.
*/
@Test public void testNNClearsCommandsOnFailoverAfterStartup() throws Exception {
DFSTestUtil.createFile(fs,TEST_FILE_PATH,30 * SMALL_BLOCK,(short)3,1L);
banner("Shutting down NN2");
cluster.shutdownNameNode(1);
banner("Setting replication to 1, rolling edit log.");
nn1.getRpcServer().setReplication(TEST_FILE,(short)1);
nn1.getRpcServer().rollEditLog();
banner("Starting NN2 again.");
cluster.restartNameNode(1);
nn2=cluster.getNameNode(1);
banner("triggering BRs");
cluster.triggerBlockReports();
banner("computing invalidation on nn1");
BlockManagerTestUtil.computeInvalidationWork(nn1.getNamesystem().getBlockManager());
banner("computing invalidation on nn2");
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
banner("Metadata immediately before failover");
doMetasave(nn2);
banner("Failing to NN2 but let NN1 continue to think it's active");
NameNodeAdapter.abortEditLogs(nn1);
NameNodeAdapter.enterSafeMode(nn1,false);
cluster.transitionToActive(1);
assertEquals(1,nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication());
banner("Metadata immediately after failover");
doMetasave(nn2);
banner("Triggering heartbeats and block reports so that fencing is completed");
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
banner("Metadata after nodes have all block-reported");
doMetasave(nn2);
assertEquals(0,nn2.getNamesystem().getPostponedMisreplicatedBlocks());
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
HATestUtil.waitForNNToIssueDeletions(nn2);
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertEquals(0,nn2.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn2.getNamesystem().getPendingReplicationBlocks());
banner("Making sure the file is still readable");
FileSystem fs2=cluster.getFileSystem(1);
DFSTestUtil.readFile(fs2,TEST_FILE_PATH);
}
InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-2742. The issue in this bug was:
* - DN does a block report while file is open. This BR contains
* the block in RBW state.
* - Standby queues the RBW state in PendingDatanodeMessages
* - Standby processes edit logs during failover. Before fixing
* this bug, it was mistakenly applying the RBW reported state
* after the block had been completed, causing the block to get
* marked corrupt. Instead, we should now be applying the RBW
* message on OP_ADD, and then the FINALIZED message on OP_CLOSE.
*/
@Test public void testBlockReportsWhileFileBeingWritten() throws Exception {
FSDataOutputStream out=fs.create(TEST_FILE_PATH);
try {
AppendTestUtil.write(out,0,10);
out.hflush();
cluster.triggerBlockReports();
}
finally {
IOUtils.closeStream(out);
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
assertEquals(0,nn1.getNamesystem().getCorruptReplicaBlocks());
assertEquals(0,nn2.getNamesystem().getCorruptReplicaBlocks());
DFSTestUtil.readFile(fs,TEST_FILE_PATH);
}
InternalCallVerifier EqualityVerifier
@Test public void testDnFencing() throws Exception {
DFSTestUtil.createFile(fs,TEST_FILE_PATH,30 * SMALL_BLOCK,(short)3,1L);
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,TEST_FILE_PATH);
nn1.getRpcServer().setReplication(TEST_FILE,(short)1);
BlockManagerTestUtil.computeInvalidationWork(nn1.getNamesystem().getBlockManager());
cluster.triggerHeartbeats();
banner("Failing to NN2 but let NN1 continue to think it's active");
NameNodeAdapter.abortEditLogs(nn1);
NameNodeAdapter.enterSafeMode(nn1,false);
cluster.transitionToActive(1);
assertEquals(1,nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication());
banner("NN2 Metadata immediately after failover");
doMetasave(nn2);
assertEquals(30,nn2.getNamesystem().getPostponedMisreplicatedBlocks());
banner("Triggering heartbeats and block reports so that fencing is completed");
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
banner("Metadata after nodes have all block-reported");
doMetasave(nn2);
assertEquals(0,nn2.getNamesystem().getPostponedMisreplicatedBlocks());
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertEquals(0,nn2.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn2.getNamesystem().getPendingReplicationBlocks());
banner("Making sure the file is still readable");
FileSystem fs2=cluster.getFileSystem(1);
DFSTestUtil.readFile(fs2,TEST_FILE_PATH);
banner("Waiting for the actual block files to get deleted from DNs.");
waitForTrueReplication(cluster,block,1);
}
InternalCallVerifier EqualityVerifier
/**
* Test case that reduces replication of a file with a lot of blocks
* and then fails over right after those blocks enter the DN invalidation
* queues on the active. Ensures that fencing is correct and no replicas
* are lost.
*/
@Test public void testNNClearsCommandsOnFailoverWithReplChanges() throws Exception {
DFSTestUtil.createFile(fs,TEST_FILE_PATH,30 * SMALL_BLOCK,(short)1,1L);
banner("rolling NN1's edit log, forcing catch-up");
HATestUtil.waitForStandbyToCatchUp(nn1,nn2);
nn1.getRpcServer().setReplication(TEST_FILE,(short)2);
while (BlockManagerTestUtil.getComputedDatanodeWork(nn1.getNamesystem().getBlockManager()) > 0) {
LOG.info("Getting more replication work computed");
}
BlockManager bm1=nn1.getNamesystem().getBlockManager();
while (bm1.getPendingReplicationBlocksCount() > 0) {
BlockManagerTestUtil.updateState(bm1);
cluster.triggerHeartbeats();
Thread.sleep(1000);
}
banner("triggering BRs");
cluster.triggerBlockReports();
nn1.getRpcServer().setReplication(TEST_FILE,(short)1);
banner("computing invalidation on nn1");
BlockManagerTestUtil.computeInvalidationWork(nn1.getNamesystem().getBlockManager());
doMetasave(nn1);
banner("computing invalidation on nn2");
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
doMetasave(nn2);
banner("Metadata immediately before failover");
doMetasave(nn2);
banner("Failing to NN2 but let NN1 continue to think it's active");
NameNodeAdapter.abortEditLogs(nn1);
NameNodeAdapter.enterSafeMode(nn1,false);
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
cluster.transitionToActive(1);
assertEquals(1,nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication());
banner("Metadata immediately after failover");
doMetasave(nn2);
banner("Triggering heartbeats and block reports so that fencing is completed");
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
banner("Metadata after nodes have all block-reported");
doMetasave(nn2);
assertEquals(0,nn2.getNamesystem().getPostponedMisreplicatedBlocks());
BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager());
HATestUtil.waitForNNToIssueDeletions(nn2);
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertEquals(0,nn2.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn2.getNamesystem().getPendingReplicationBlocks());
banner("Making sure the file is still readable");
FileSystem fs2=cluster.getFileSystem(1);
DFSTestUtil.readFile(fs2,TEST_FILE_PATH);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test if StandbyException can be thrown from StandbyNN, when it's requested for
* password. (HDFS-6475). With StandbyException, the client can failover to try
* activeNN.
*/
@Test public void testDelegationTokenStandbyNNAppearFirst() throws Exception {
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
final DelegationTokenSecretManager stSecretManager=NameNodeAdapter.getDtSecretManager(nn1.getNamesystem());
final Token token=getDelegationToken(fs,"JobTracker");
final DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
assertTrue(null != stSecretManager.retrievePassword(identifier));
final UserGroupInformation ugi=UserGroupInformation.createRemoteUser("JobTracker");
ugi.addToken(token);
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Object run(){
try {
try {
byte[] tmppw=dtSecretManager.retrievePassword(identifier);
fail("InvalidToken with cause StandbyException is expected" + " since nn0 is standby");
return tmppw;
}
catch ( IOException e) {
throw new SecurityException("Failed to obtain user group information: " + e,e);
}
}
catch ( Exception oe) {
HttpServletResponse response=mock(HttpServletResponse.class);
ExceptionHandler eh=new ExceptionHandler();
eh.initResponse(response);
Response resp=eh.toResponse(oe);
Map,?> m=(Map,?>)JSON.parse(resp.getEntity().toString());
RemoteException re=JsonUtil.toRemoteException(m);
Exception unwrapped=((RemoteException)re).unwrapRemoteException(StandbyException.class);
assertTrue(unwrapped instanceof StandbyException);
return null;
}
}
}
);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test if correct exception (StandbyException or RetriableException) can be
* thrown during the NN failover.
*/
@Test public void testDelegationTokenDuringNNFailover() throws Exception {
EditLogTailer editLogTailer=nn1.getNamesystem().getEditLogTailer();
editLogTailer.stop();
Configuration conf=(Configuration)Whitebox.getInternalState(editLogTailer,"conf");
nn1.getNamesystem().setEditLogTailerForTests(new EditLogTailerForTest(nn1.getNamesystem(),conf));
final Token token=getDelegationToken(fs,"JobTracker");
DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
LOG.info("A valid token should have non-null password, " + "and should be renewed successfully");
assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken(token,"JobTracker");
cluster.transitionToStandby(0);
try {
cluster.getNameNodeRpc(0).renewDelegationToken(token);
fail("StandbyException is expected since nn0 is in standby state");
}
catch ( StandbyException e) {
GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(),e);
}
new Thread(){
@Override public void run(){
try {
cluster.transitionToActive(1);
}
catch ( Exception e) {
LOG.error("Transition nn1 to active failed",e);
}
}
}
.start();
Thread.sleep(1000);
try {
nn1.getNamesystem().verifyToken(token.decodeIdentifier(),token.getPassword());
fail("RetriableException/StandbyException is expected since nn1 is in transition");
}
catch ( IOException e) {
assertTrue(e instanceof StandbyException || e instanceof RetriableException);
LOG.info("Got expected exception",e);
}
catchup=true;
synchronized (this) {
this.notifyAll();
}
Configuration clientConf=dfs.getConf();
doRenewOrCancel(token,clientConf,TokenTestAction.RENEW);
doRenewOrCancel(token,clientConf,TokenTestAction.CANCEL);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an
* exception if the URI is a logical URI. This bug fails the combination of
* ha + mapred + security.
*/
@Test public void testDFSGetCanonicalServiceName() throws Exception {
URI hAUri=HATestUtil.getLogicalUri(cluster);
String haService=HAUtil.buildTokenServiceForLogicalUri(hAUri,HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService,dfs.getCanonicalServiceName());
final String renewer=UserGroupInformation.getCurrentUser().getShortUserName();
final Token token=getDelegationToken(dfs,renewer);
assertEquals(haService,token.getService().toString());
token.renew(dfs.getConf());
token.cancel(dfs.getConf());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testHdfsGetCanonicalServiceName() throws Exception {
Configuration conf=dfs.getConf();
URI haUri=HATestUtil.getLogicalUri(cluster);
AbstractFileSystem afs=AbstractFileSystem.createFileSystem(haUri,conf);
String haService=HAUtil.buildTokenServiceForLogicalUri(haUri,HdfsConstants.HDFS_URI_SCHEME).toString();
assertEquals(haService,afs.getCanonicalServiceName());
Token> token=afs.getDelegationTokens(UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
assertEquals(haService,token.getService().toString());
token.renew(conf);
token.cancel(conf);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDelegationTokenDFSApi() throws Exception {
final Token token=getDelegationToken(fs,"JobTracker");
DelegationTokenIdentifier identifier=new DelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
LOG.info("A valid token should have non-null password, " + "and should be renewed successfully");
assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken(token,"JobTracker");
Configuration clientConf=dfs.getConf();
doRenewOrCancel(token,clientConf,TokenTestAction.RENEW);
Configuration emptyConf=new Configuration();
try {
doRenewOrCancel(token,emptyConf,TokenTestAction.RENEW);
fail("Did not throw trying to renew with an empty conf!");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Unable to map logical nameservice URI",ioe);
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
doRenewOrCancel(token,clientConf,TokenTestAction.RENEW);
doRenewOrCancel(token,clientConf,TokenTestAction.CANCEL);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testHAUtilClonesDelegationTokens() throws Exception {
final Token token=getDelegationToken(fs,"JobTracker");
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("test");
URI haUri=new URI("hdfs://my-ha-uri/");
token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,HdfsConstants.HDFS_URI_SCHEME));
ugi.addToken(token);
Collection nnAddrs=new HashSet();
nnAddrs.add(new InetSocketAddress("localhost",nn0.getNameNodeAddress().getPort()));
nnAddrs.add(new InetSocketAddress("localhost",nn1.getNameNodeAddress().getPort()));
HAUtil.cloneDelegationTokenForLogicalUri(ugi,haUri,nnAddrs);
Collection> tokens=ugi.getTokens();
assertEquals(3,tokens.size());
LOG.info("Tokens:\n" + Joiner.on("\n").join(tokens));
DelegationTokenSelector dts=new DelegationTokenSelector();
for ( InetSocketAddress addr : nnAddrs) {
Text ipcDtService=SecurityUtil.buildTokenService(addr);
Token token2=dts.selectToken(ipcDtService,ugi.getTokens());
assertNotNull(token2);
assertArrayEquals(token.getIdentifier(),token2.getIdentifier());
assertArrayEquals(token.getPassword(),token2.getPassword());
}
SecurityUtilTestHelper.setTokenServiceUseIp(false);
for ( InetSocketAddress addr : nnAddrs) {
Text ipcDtService=SecurityUtil.buildTokenService(addr);
Token token2=dts.selectToken(ipcDtService,ugi.getTokens());
assertNull(token2);
}
HAUtil.cloneDelegationTokenForLogicalUri(ugi,haUri,nnAddrs);
for ( InetSocketAddress addr : nnAddrs) {
Text ipcDtService=SecurityUtil.buildTokenService(addr);
Token token2=dts.selectToken(ipcDtService,ugi.getTokens());
assertNotNull(token2);
assertArrayEquals(token.getIdentifier(),token2.getIdentifier());
assertArrayEquals(token.getPassword(),token2.getPassword());
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testTailer() throws IOException, InterruptedException, ServiceFailedException {
Configuration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
HAUtil.setAllowStandbyReads(conf,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
try {
for (int i=0; i < DIRS_TO_MAKE / 2; i++) {
NameNodeAdapter.mkdirs(nn1,getDirPath(i),new PermissionStatus("test","test",new FsPermission((short)00755)),true);
}
HATestUtil.waitForStandbyToCatchUp(nn1,nn2);
for (int i=0; i < DIRS_TO_MAKE / 2; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2,getDirPath(i),false).isDir());
}
for (int i=DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
NameNodeAdapter.mkdirs(nn1,getDirPath(i),new PermissionStatus("test","test",new FsPermission((short)00755)),true);
}
HATestUtil.waitForStandbyToCatchUp(nn1,nn2);
for (int i=DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
assertTrue(NameNodeAdapter.getFileInfo(nn2,getDirPath(i),false).isDir());
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier NullVerifier
@Test public void testStartup() throws Exception {
Configuration conf=new Configuration();
HAUtil.setAllowStandbyReads(conf,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
try {
List allDirs=Lists.newArrayList();
allDirs.addAll(cluster.getNameDirs(0));
allDirs.addAll(cluster.getNameDirs(1));
allDirs.add(cluster.getSharedEditsDir(0,1));
assertNoEditFiles(allDirs);
cluster.transitionToActive(0);
assertEditFiles(cluster.getNameDirs(0),NNStorage.getInProgressEditsFileName(1));
assertEditFiles(Collections.singletonList(cluster.getSharedEditsDir(0,1)),NNStorage.getInProgressEditsFileName(1));
assertNoEditFiles(cluster.getNameDirs(1));
cluster.getNameNode(0).getRpcServer().mkdirs("/test",FsPermission.createImmutable((short)0755),true);
cluster.restartNameNode(1);
assertEditFiles(cluster.getNameDirs(0),NNStorage.getInProgressEditsFileName(1));
assertEditFiles(Collections.singletonList(cluster.getSharedEditsDir(0,1)),NNStorage.getInProgressEditsFileName(1));
assertNoEditFiles(cluster.getNameDirs(1));
assertNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),"/test",true));
cluster.getNameNode(0).getRpcServer().mkdirs("/test2",FsPermission.createImmutable((short)0755),true);
cluster.restartNameNode(0);
cluster.transitionToActive(1);
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),"/test",true));
assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),"/test2",true));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void ensureSerialNumbersNeverOverlap(){
BlockTokenSecretManager btsm1=cluster.getNamesystem(0).getBlockManager().getBlockTokenSecretManager();
BlockTokenSecretManager btsm2=cluster.getNamesystem(1).getBlockManager().getBlockTokenSecretManager();
btsm1.setSerialNo(0);
btsm2.setSerialNo(0);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
btsm1.setSerialNo(Integer.MAX_VALUE);
btsm2.setSerialNo(Integer.MAX_VALUE);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
btsm1.setSerialNo(Integer.MIN_VALUE);
btsm2.setSerialNo(Integer.MIN_VALUE);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
btsm1.setSerialNo(Integer.MAX_VALUE / 2);
btsm2.setSerialNo(Integer.MAX_VALUE / 2);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
btsm1.setSerialNo(Integer.MIN_VALUE / 2);
btsm2.setSerialNo(Integer.MIN_VALUE / 2);
assertFalse(btsm1.getSerialNoForTesting() == btsm2.getSerialNoForTesting());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException {
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.writeFile(fs,TEST_PATH,TEST_DATA);
assertEquals(TEST_DATA,DFSTestUtil.readFile(fs,TEST_PATH));
DFSClient dfsClient=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
DFSClient spyDfsClient=Mockito.spy(dfsClient);
Mockito.doAnswer(new Answer(){
@Override public LocatedBlocks answer( InvocationOnMock arg0) throws Throwable {
LocatedBlocks locatedBlocks=(LocatedBlocks)arg0.callRealMethod();
for ( LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
Token token=lb.getBlockToken();
BlockTokenIdentifier id=lb.getBlockToken().decodeIdentifier();
id.setExpiryDate(Time.now() + 10);
Token newToken=new Token(id.getBytes(),token.getPassword(),token.getKind(),token.getService());
lb.setBlockToken(newToken);
}
return locatedBlocks;
}
}
).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(),Mockito.anyLong(),Mockito.anyLong());
DFSClientAdapter.setDFSClient((DistributedFileSystem)fs,spyDfsClient);
try {
assertEquals(TEST_DATA,DFSTestUtil.readFile(fs,TEST_PATH));
fail("Shouldn't have been able to read a file with invalid block tokens");
}
catch ( IOException ioe) {
GenericTestUtils.assertExceptionContains("Could not obtain block",ioe);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test that marking the shared edits dir as being "required" causes the NN to
* fail if that dir can't be accessed.
*/
@Test public void testFailureOfSharedDir() throws Exception {
Configuration conf=new Configuration();
conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,2000);
MiniDFSCluster cluster=null;
File sharedEditsDir=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build();
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
assertTrue(fs.mkdirs(new Path("/test1")));
URI sharedEditsUri=cluster.getSharedEditsDir(0,1);
sharedEditsDir=new File(sharedEditsUri);
assertEquals(0,FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"-w",true));
Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2);
NameNode nn1=cluster.getNameNode(1);
assertTrue(nn1.isStandbyState());
assertFalse("StandBy NameNode should not go to SafeMode on resource unavailability",nn1.isInSafeMode());
NameNode nn0=cluster.getNameNode(0);
try {
nn0.getRpcServer().rollEditLog();
fail("Succeeded in rolling edit log despite shared dir being deleted");
}
catch ( ExitException ee) {
GenericTestUtils.assertExceptionContains("finalize log segment 1, 3 failed for required journal",ee);
}
for ( URI editsUri : cluster.getNameEditsDirs(0)) {
if (editsUri.equals(sharedEditsUri)) {
continue;
}
File editsDir=new File(editsUri.getPath());
File curDir=new File(editsDir,"current");
GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getInProgressEditsFileName(1));
}
}
finally {
if (sharedEditsDir != null) {
FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"+w",true);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test that the shared edits dir is automatically added to the list of edits
* dirs that are marked required.
*/
@Test public void testSharedDirIsAutomaticallyMarkedRequired() throws URISyntaxException {
URI foo=new URI("file:/foo");
URI bar=new URI("file:/bar");
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,Joiner.on(",").join(foo,bar));
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,foo.toString());
assertFalse(FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains(bar));
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,bar.toString());
Collection requiredEditsDirs=FSNamesystem.getRequiredNamespaceEditsDirs(conf);
assertTrue(Joiner.on(",").join(requiredEditsDirs) + " does not contain " + bar,requiredEditsDirs.contains(bar));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Make sure that the shared edits dirs are listed before non-shared dirs
* when the configuration is parsed. This ensures that the shared journals
* are synced before the local ones.
*/
@Test public void testSharedDirsComeFirstInEditsList() throws Exception {
Configuration conf=new Configuration();
URI sharedA=new URI("file:///shared-A");
URI localA=new URI("file:///local-A");
URI localB=new URI("file:///local-B");
URI localC=new URI("file:///local-C");
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,sharedA.toString());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,Joiner.on(",").join(localC,localB,localA));
List dirs=FSNamesystem.getNamespaceEditsDirs(conf);
assertEquals("Shared dirs should come first, then local dirs, in the order " + "they were listed in the configuration.",Joiner.on(",").join(sharedA,localC,localB,localA),Joiner.on(",").join(dirs));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Ensure that the standby fails to become active if it cannot read all
* available edits in the shared edits dir when it is transitioning to active
* state.
*/
@Test public void testFailureToReadEditsOnTransitionToActive() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3));
causeFailureOnEditLogRead();
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
cluster.shutdownNameNode(0);
try {
cluster.transitionToActive(1);
fail("Standby transitioned to active, but should not have been able to");
}
catch ( ExitException ee) {
GenericTestUtils.assertExceptionContains("Error replaying edit log",ee);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test that the standby NN won't double-replay earlier edits if it encounters
* a failure to read a later edit.
*/
@Test public void testFailuretoReadEdits() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fs.setOwner(new Path(TEST_DIR1),"foo","bar");
assertTrue(fs.delete(new Path(TEST_DIR1),true));
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
LimitedEditLogAnswer answer=causeFailureOnEditLogRead();
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR1,false));
assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR2,false).isDir());
assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR3,false));
answer.setThrowExceptionOnRead(false);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
assertNull(NameNodeAdapter.getFileInfo(nn1,TEST_DIR1,false));
assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR2,false).isDir());
assertTrue(NameNodeAdapter.getFileInfo(nn1,TEST_DIR3,false).isDir());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the following case:
* 1. SBN is reading a finalized edits file when NFS disappears halfway
* through (or some intermittent error happens)
* 2. SBN performs a checkpoint and uploads it to the NN
* 3. NN receives a checkpoint that doesn't correspond to the end of any log
* segment
* 4. Both NN and SBN should be able to restart at this point.
* This is a regression test for HDFS-2766.
*/
@Test public void testCheckpointStartingMidEditsFile() throws Exception {
assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3));
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3));
causeFailureOnEditLogRead();
assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
try {
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
fail("Standby fully caught up, but should not have been able to");
}
catch ( HATestUtil.CouldNotCatchUpException e) {
}
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(0,3,5));
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5));
cluster.restartNameNode(0);
HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(0,3,5));
FileSystem fs0=null;
try {
fs0=FileSystem.get(NameNode.getUri(nn0.getNameNodeAddress()),conf);
assertTrue(fs0.exists(new Path(TEST_DIR1)));
assertTrue(fs0.exists(new Path(TEST_DIR2)));
assertTrue(fs0.exists(new Path(TEST_DIR3)));
}
finally {
if (fs0 != null) fs0.close();
}
}
InternalCallVerifier EqualityVerifier
/**
* Test to verify the processing of PendingDataNodeMessageQueue in case of
* append. One block will marked as corrupt if the OP_ADD, OP_UPDATE_BLOCKS
* comes in one edit log segment and OP_CLOSE edit comes in next log segment
* which is loaded during failover. Regression test for HDFS-3605.
*/
@Test public void testMultipleAppendsDuringCatchupTailing() throws Exception {
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,"5000");
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,-1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
FileSystem fs=null;
try {
cluster.transitionToActive(0);
fs=HATestUtil.configureFailoverFs(cluster,conf);
Path fileToAppend=new Path("/FileToAppend");
FSDataOutputStream out=fs.create(fileToAppend);
out.writeBytes("/data");
out.hflush();
cluster.getNameNode(0).getRpcServer().rollEditLog();
cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
out.close();
for (int i=0; i < 5; i++) {
DFSTestUtil.appendFile(fs,fileToAppend,"data");
}
cluster.triggerBlockReports();
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
int rc=ToolRunner.run(new DFSck(cluster.getConfiguration(1)),new String[]{"/","-files","-blocks"});
assertEquals(0,rc);
assertEquals("CorruptBlocks should be empty.",0,cluster.getNameNode(1).getNamesystem().getCorruptReplicaBlocks());
}
finally {
if (null != cluster) {
cluster.shutdown();
}
if (null != fs) {
fs.close();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Tests that the namenode edits dirs and shared edits dirs are gotten with
* duplicates removed
*/
@Test public void testHAUniqueEditDirs() throws IOException {
Configuration conf=new Configuration();
conf.set(DFS_NAMENODE_EDITS_DIR_KEY,"file://edits/dir, " + "file://edits/shared/dir");
conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY,"file://edits/shared/dir");
Collection editsDirs=FSNamesystem.getNamespaceEditsDirs(conf);
assertEquals(2,editsDirs.size());
}
InternalCallVerifier EqualityVerifier
@Test public void testGetOtherNNHttpAddress() throws IOException {
Configuration conf=getHAConf("ns1","1.2.3.1","1.2.3.2");
conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID,"ns1");
NameNode.initializeGenericKeys(conf,"ns1","nn1");
StandbyCheckpointer checkpointer=new StandbyCheckpointer(conf,fsn);
assertEquals(new URL("http","1.2.3.2",DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,""),checkpointer.getActiveNNAddress());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHAMetrics() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,Integer.MAX_VALUE);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
FileSystem fs=null;
try {
cluster.waitActive();
FSNamesystem nn0=cluster.getNamesystem(0);
FSNamesystem nn1=cluster.getNamesystem(1);
assertEquals(nn0.getHAState(),"standby");
assertTrue(0 < nn0.getMillisSinceLastLoadedEdits());
assertEquals(nn1.getHAState(),"standby");
assertTrue(0 < nn1.getMillisSinceLastLoadedEdits());
cluster.transitionToActive(0);
assertEquals("active",nn0.getHAState());
assertEquals(0,nn0.getMillisSinceLastLoadedEdits());
assertEquals("standby",nn1.getHAState());
assertTrue(0 < nn1.getMillisSinceLastLoadedEdits());
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertEquals("standby",nn0.getHAState());
assertTrue(0 < nn0.getMillisSinceLastLoadedEdits());
assertEquals("active",nn1.getHAState());
assertEquals(0,nn1.getMillisSinceLastLoadedEdits());
Thread.sleep(2000);
assertTrue(2000 <= nn0.getMillisSinceLastLoadedEdits());
assertEquals(0,nn0.getPendingDataNodeMessageCount());
assertEquals(0,nn1.getPendingDataNodeMessageCount());
fs=HATestUtil.configureFailoverFs(cluster,conf);
DFSTestUtil.createFile(fs,new Path("/foo"),10,(short)1,1L);
assertTrue(0 < nn0.getPendingDataNodeMessageCount());
assertEquals(0,nn1.getPendingDataNodeMessageCount());
long millisSinceLastLoadedEdits=nn0.getMillisSinceLastLoadedEdits();
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(1),cluster.getNameNode(0));
assertEquals(0,nn0.getPendingDataNodeMessageCount());
assertEquals(0,nn1.getPendingDataNodeMessageCount());
long newMillisSinceLastLoadedEdits=nn0.getMillisSinceLastLoadedEdits();
assertTrue("expected " + millisSinceLastLoadedEdits + " > "+ newMillisSinceLastLoadedEdits,millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits);
}
finally {
IOUtils.cleanup(LOG,fs);
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Similar to {@link #testBlocksRemovedWhileInSafeMode()} except that
* the OP_DELETE edits arrive at the SBN before the block deletion reports.
* The tracking of safe blocks needs to properly account for the removal
* of the blocks as well as the safe count. This is a regression test for
* HDFS-2742.
*/
@Test public void testBlocksRemovedWhileInSafeModeEditsArriveFirst() throws Exception {
banner("Starting with NN0 active and NN1 standby, creating some blocks");
DFSTestUtil.createFile(fs,new Path("/test"),10 * BLOCK_SIZE,(short)3,1L);
nn0.getRpcServer().rollEditLog();
banner("Restarting standby");
restartStandby();
String status=nn1.getNamesystem().getSafemode();
assertTrue("Bad safemode status: '" + status + "'",status.startsWith("Safe mode is ON. The reported blocks 10 has reached the threshold " + "0.9990 of total blocks 10. The number of live datanodes 3 has " + "reached the minimum number 0. In safe mode extension. "+ "Safe mode will be turned off automatically"));
banner("Removing the blocks without rolling the edit log");
fs.delete(new Path("/test"),true);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
assertSafeMode(nn1,0,0,3,0);
banner("Triggering sending deletions to DNs and Deletion Reports");
BlockManagerTestUtil.computeAllPendingWork(nn0.getNamesystem().getBlockManager());
cluster.triggerHeartbeats();
HATestUtil.waitForDNDeletions(cluster);
cluster.triggerDeletionReports();
assertSafeMode(nn1,0,0,3,0);
}
InternalCallVerifier BooleanVerifier
/**
* Test case for enter safemode in standby namenode, when it is already in startup safemode.
* It is a regression test for HDFS-2747.
*/
@Test public void testEnterSafeModeInSBNShouldNotThrowNPE() throws Exception {
banner("Starting with NN0 active and NN1 standby, creating some blocks");
DFSTestUtil.createFile(fs,new Path("/test"),3 * BLOCK_SIZE,(short)3,1L);
nn0.getRpcServer().rollEditLog();
banner("Creating some blocks that won't be in the edit log");
DFSTestUtil.createFile(fs,new Path("/test2"),5 * BLOCK_SIZE,(short)3,1L);
banner("Deleting the original blocks");
fs.delete(new Path("/test"),true);
banner("Restarting standby");
restartStandby();
FSNamesystem namesystem=nn1.getNamesystem();
String status=namesystem.getSafemode();
assertTrue("Bad safemode status: '" + status + "'",status.startsWith("Safe mode is ON."));
NameNodeAdapter.enterSafeMode(nn1,false);
assertTrue("Failed to enter into safemode in standby",namesystem.isInSafeMode());
NameNodeAdapter.enterSafeMode(nn1,false);
assertTrue("Failed to enter into safemode in standby",namesystem.isInSafeMode());
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
* @throws Exception
*/
@Test public void testIsInSafemode() throws Exception {
NameNode nn2=cluster.getNameNode(1);
assertTrue("nn2 should be in standby state",nn2.isStandbyState());
InetSocketAddress nameNodeAddress=nn2.getNameNodeAddress();
Configuration conf=new Configuration();
DistributedFileSystem dfs=new DistributedFileSystem();
try {
dfs.initialize(URI.create("hdfs://" + nameNodeAddress.getHostName() + ":"+ nameNodeAddress.getPort()),conf);
dfs.isInSafeMode();
fail("StandBy should throw exception for isInSafeMode");
}
catch ( IOException e) {
if (e instanceof RemoteException) {
IOException sbExcpetion=((RemoteException)e).unwrapRemoteException();
assertTrue("StandBy nn should not support isInSafeMode",sbExcpetion instanceof StandbyException);
}
else {
throw e;
}
}
finally {
if (null != dfs) {
dfs.close();
}
}
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
DistributedFileSystem dfsWithFailOver=(DistributedFileSystem)fs;
assertTrue("ANN should be in SafeMode",dfsWithFailOver.isInSafeMode());
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
assertFalse("ANN should be out of SafeMode",dfsWithFailOver.isInSafeMode());
}
InternalCallVerifier BooleanVerifier
/**
* Test case for enter safemode in active namenode, when it is already in startup safemode.
* It is a regression test for HDFS-2747.
*/
@Test public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
banner("Restarting active");
DFSTestUtil.createFile(fs,new Path("/test"),3 * BLOCK_SIZE,(short)3,1L);
restartActive();
nn0.getRpcServer().transitionToActive(new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FSNamesystem namesystem=nn0.getNamesystem();
String status=namesystem.getSafemode();
assertTrue("Bad safemode status: '" + status + "'",status.startsWith("Safe mode is ON."));
NameNodeAdapter.enterSafeMode(nn0,false);
assertTrue("Failed to enter into safemode in active",namesystem.isInSafeMode());
NameNodeAdapter.enterSafeMode(nn0,false);
assertTrue("Failed to enter into safemode in active",namesystem.isInSafeMode());
}
InternalCallVerifier BooleanVerifier
/**
* Make sure the client retries when the active NN is in safemode
*/
@Test(timeout=300000) public void testClientRetrySafeMode() throws Exception {
final Map results=Collections.synchronizedMap(new HashMap());
final Path test=new Path("/test");
NameNodeAdapter.enterSafeMode(nn0,false);
SafeModeInfo safeMode=(SafeModeInfo)Whitebox.getInternalState(nn0.getNamesystem(),"safeMode");
Whitebox.setInternalState(safeMode,"extension",Integer.valueOf(30000));
LOG.info("enter safemode");
new Thread(){
@Override public void run(){
try {
boolean mkdir=fs.mkdirs(test);
LOG.info("mkdir finished, result is " + mkdir);
synchronized (TestHASafeMode.this) {
results.put(test,mkdir);
TestHASafeMode.this.notifyAll();
}
}
catch ( Exception e) {
LOG.info("Got Exception while calling mkdir",e);
}
}
}
.start();
assertFalse("The directory should not be created while NN in safemode",fs.exists(test));
Thread.sleep(1000);
NameNodeAdapter.leaveSafeMode(nn0);
LOG.info("leave safemode");
synchronized (this) {
while (!results.containsKey(test)) {
this.wait();
}
assertTrue(results.get(test));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure that when we transition to active in safe mode that we don't
* prematurely consider blocks missing just because not all DNs have reported
* yet.
* This is a regression test for HDFS-3921.
*/
@Test public void testNoPopulatingReplQueuesWhenStartingActiveInSafeMode() throws IOException {
DFSTestUtil.createFile(fs,new Path("/test"),15 * BLOCK_SIZE,(short)3,1L);
cluster.stopDataNode(1);
cluster.restartNameNode(0,false);
cluster.transitionToActive(0);
assertTrue(cluster.getNameNode(0).isInSafeMode());
assertEquals(0,cluster.getNamesystem(0).getMissingBlocksCount());
}
InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-2804: standby should not populate replication
* queues when exiting safe mode.
*/
@Test public void testNoPopulatingReplQueuesWhenExitingSafemode() throws Exception {
DFSTestUtil.createFile(fs,new Path("/test"),15 * BLOCK_SIZE,(short)3,1L);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
NameNodeAdapter.saveNamespace(nn1);
nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false);
DFSTestUtil.createFile(fs,new Path("/test2"),15 * BLOCK_SIZE,(short)3,1L);
nn0.getRpcServer().rollEditLog();
cluster.stopDataNode(1);
cluster.shutdownNameNode(1);
cluster.restartNameNode(1,false);
nn1=cluster.getNameNode(1);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
return !nn1.isInSafeMode();
}
}
,100,10000);
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
assertEquals(0L,nn1.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0L,nn1.getNamesystem().getPendingReplicationBlocks());
}
InternalCallVerifier BooleanVerifier
/**
* Test NN crash and client crash/stuck immediately after block allocation
*/
@Test(timeout=100000) public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"1.0f");
String testData="testData";
cluster.getConfiguration(0).setInt("io.bytes.per.checksum",testData.length());
cluster.restartNameNode(0);
try {
cluster.waitActive();
cluster.transitionToActive(0);
cluster.transitionToStandby(1);
DistributedFileSystem dfs=cluster.getFileSystem(0);
String pathString="/tmp1.txt";
Path filePath=new Path(pathString);
FSDataOutputStream create=dfs.create(filePath,FsPermission.getDefault(),true,1024,(short)3,testData.length(),null);
create.write(testData.getBytes());
create.hflush();
long fileId=((DFSOutputStream)create.getWrappedStream()).getFileId();
FileStatus fileStatus=dfs.getFileStatus(filePath);
DFSClient client=DFSClientAdapter.getClient(dfs);
ExtendedBlock previousBlock=DFSClientAdapter.getPreviousBlock(client,fileId);
DFSClientAdapter.getNamenode(client).addBlock(pathString,client.getClientName(),new ExtendedBlock(previousBlock),new DatanodeInfo[0],DFSClientAdapter.getFileId((DFSOutputStream)create.getWrappedStream()),null);
cluster.restartNameNode(0,true);
cluster.restartDataNode(0);
cluster.transitionToActive(0);
Thread.sleep(2000);
FSDataInputStream is=dfs.open(filePath);
is.close();
dfs.recoverLease(filePath);
assertTrue("Recovery also should be success",dfs.recoverLease(filePath));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
/**
* Test that delegation tokens continue to work after the failover.
*/
@Test public void testDelegationTokensAfterFailover() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
String renewer=UserGroupInformation.getLoginUser().getUserName();
Token token=nn1.getRpcServer().getDelegationToken(new Text(renewer));
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
nn2.getRpcServer().renewDelegationToken(token);
nn2.getRpcServer().cancelDelegationToken(token);
token=nn2.getRpcServer().getDelegationToken(new Text(renewer));
Assert.assertTrue(token != null);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test for HDFS-2812. Since lease renewals go from the client
* only to the active NN, the SBN will have out-of-date lease
* info when it becomes active. We need to make sure we don't
* accidentally mark the leases as expired when the failover
* proceeds.
*/
@Test(timeout=120000) public void testLeasesRenewedOnTransition() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
FSDataOutputStream stm=null;
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
NameNode nn0=cluster.getNameNode(0);
NameNode nn1=cluster.getNameNode(1);
try {
cluster.waitActive();
cluster.transitionToActive(0);
LOG.info("Starting with NN 0 active");
stm=fs.create(TEST_FILE_PATH);
long nn0t0=NameNodeAdapter.getLeaseRenewalTime(nn0,TEST_FILE_STR);
assertTrue(nn0t0 > 0);
long nn1t0=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertEquals("Lease should not yet exist on nn1",-1,nn1t0);
Thread.sleep(5);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
long nn1t1=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been created on standby. Time was: " + nn1t1,nn1t1 > nn0t0);
Thread.sleep(5);
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
long nn1t2=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR);
assertTrue("Lease should have been renewed by failover process",nn1t2 > nn1t1);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test which takes a single node and flip flops between
* active and standby mode, making sure it doesn't
* double-play any edits.
*/
@Test public void testTransitionActiveToStandby() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
FileSystem fs=cluster.getFileSystem(0);
fs.mkdirs(TEST_DIR);
cluster.transitionToStandby(0);
try {
fs.mkdirs(new Path("/x"));
fail("Didn't throw trying to mutate FS in standby state");
}
catch ( Throwable t) {
GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported",t);
}
cluster.transitionToActive(0);
DFSTestUtil.createFile(fs,new Path(TEST_DIR,"foo"),10,(short)1,1L);
fs.delete(TEST_DIR,true);
cluster.transitionToStandby(0);
cluster.transitionToActive(0);
assertFalse(fs.exists(TEST_DIR));
}
finally {
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* The secret manager needs to start/stop - the invariant should be that
* the secret manager runs if and only if the NN is active and not in
* safe mode. As a state diagram, we need to test all of the following
* transitions to make sure the secret manager is started when we transition
* into state 4, but none of the others.
*
* SafeMode Not SafeMode
* Standby 1 <------> 2
* ^ ^
* | |
* v v
* Active 3 <------> 4
*
*/
@Test(timeout=60000) public void testSecretManagerState() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY,50);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).waitSafeMode(false).build();
try {
cluster.transitionToActive(0);
DFSTestUtil.createFile(cluster.getFileSystem(0),TEST_FILE_PATH,6000,(short)1,1L);
cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,60000);
cluster.restartNameNode(0);
NameNode nn=cluster.getNameNode(0);
banner("Started in state 1.");
assertTrue(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 1->2. Should not start secret manager");
NameNodeAdapter.leaveSafeMode(nn);
assertTrue(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 2->1. Should not start secret manager.");
NameNodeAdapter.enterSafeMode(nn,false);
assertTrue(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 1->3. Should not start secret manager.");
nn.getRpcServer().transitionToActive(REQ_INFO);
assertFalse(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 3->1. Should not start secret manager.");
nn.getRpcServer().transitionToStandby(REQ_INFO);
assertTrue(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 1->3->4. Should start secret manager.");
nn.getRpcServer().transitionToActive(REQ_INFO);
NameNodeAdapter.leaveSafeMode(nn);
assertFalse(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertTrue(isDTRunning(nn));
banner("Transition 4->3. Should stop secret manager");
NameNodeAdapter.enterSafeMode(nn,false);
assertFalse(nn.isStandbyState());
assertTrue(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 3->4. Should start secret manager");
NameNodeAdapter.leaveSafeMode(nn);
assertFalse(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertTrue(isDTRunning(nn));
for (int i=0; i < 20; i++) {
banner("Transition 4->2. Should stop secret manager.");
nn.getRpcServer().transitionToStandby(REQ_INFO);
assertTrue(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertFalse(isDTRunning(nn));
banner("Transition 2->4. Should start secret manager");
nn.getRpcServer().transitionToActive(REQ_INFO);
assertFalse(nn.isStandbyState());
assertFalse(nn.isInSafeMode());
assertTrue(isDTRunning(nn));
}
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testInitializeSharedEdits() throws Exception {
assertCannotStartNameNodes();
assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
assertCanStartHaNameNodes("1");
shutdownClusterAndRemoveSharedEditsDir();
assertCannotStartNameNodes();
assertFalse(NameNode.initializeSharedEdits(cluster.getConfiguration(0)));
assertCanStartHaNameNodes("2");
}
InternalCallVerifier NullVerifier
@Test public void testInitializeSharedEditsConfiguresGenericConfKeys() throws IOException {
Configuration conf=new Configuration();
conf.set(DFSConfigKeys.DFS_NAMESERVICES,"ns1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),"localhost:1234");
assertNull(conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
NameNode.initializeSharedEdits(conf);
assertNotNull(conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
}
InternalCallVerifier BooleanVerifier
@Test public void testFailWhenNoSharedEditsSpecified() throws Exception {
Configuration confNoShared=new Configuration(conf);
confNoShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
assertFalse(NameNode.initializeSharedEdits(confNoShared,true));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testChangedStorageId() throws IOException, URISyntaxException, InterruptedException {
HdfsConfiguration conf=new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).nnTopology(MiniDFSNNTopology.simpleHATopology()).build();
try {
cluster.transitionToActive(0);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
OutputStream out=fs.create(filePath);
out.write("foo bar baz".getBytes());
out.close();
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),cluster.getNameNode(1));
ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,filePath);
assertTrue(MiniDFSCluster.changeGenStampOfBlock(0,block,900));
DataNodeProperties dnProps=cluster.stopDataNode(0);
cluster.restartNameNode(1,false);
assertTrue(cluster.restartDataNode(dnProps,true));
while (cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount() < 1) {
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
}
assertEquals(1,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount());
String oldStorageId=getRegisteredDatanodeUid(cluster,1);
assertTrue(wipeAndRestartDn(cluster,0));
String newStorageId="";
do {
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
newStorageId=getRegisteredDatanodeUid(cluster,1);
System.out.println("====> oldStorageId: " + oldStorageId + " newStorageId: "+ newStorageId);
}
while (newStorageId.equals(oldStorageId));
assertEquals(0,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount());
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test the scenario where the NN fails over after issuing a block
* synchronization request, but before it is committed. The
* DN running the recovery should then fail to commit the synchronization
* and a later retry will succeed.
*/
@Test(timeout=30000) public void testFailoverRightBeforeCommitSynchronization() throws Exception {
final Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
FSDataOutputStream stm=null;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
Thread.sleep(500);
LOG.info("Starting with NN 0 active");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
stm=fs.create(TEST_PATH);
AppendTestUtil.write(stm,0,BLOCK_SIZE / 2);
stm.hflush();
NameNode nn0=cluster.getNameNode(0);
ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,TEST_PATH);
DatanodeDescriptor expectedPrimary=DFSTestUtil.getExpectedPrimaryNode(nn0,blk);
LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
DataNode primaryDN=cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy=DataNodeTestUtils.spyOnBposToNN(primaryDN,nn0);
DelayAnswer delayer=new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk),Mockito.anyInt(),Mockito.anyLong(),Mockito.eq(true),Mockito.eq(false),(DatanodeID[])Mockito.anyObject(),(String[])Mockito.anyObject());
DistributedFileSystem fsOtherUser=createFsAsOtherUser(cluster,conf);
assertFalse(fsOtherUser.recoverLease(TEST_PATH));
LOG.info("Waiting for commitBlockSynchronization call from primary");
delayer.waitForCall();
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
delayer.proceed();
delayer.waitForResult();
Throwable t=delayer.getThrown();
if (t == null) {
fail("commitBlockSynchronization call did not fail on standby");
}
GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported",t);
loopRecoverLease(fsOtherUser,TEST_PATH);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_SIZE / 2);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests lease recovery if a client crashes. This approximates the
* use case of HBase WALs being recovered after a NN failover.
*/
@Test(timeout=30000) public void testLeaseRecoveryAfterFailover() throws Exception {
final Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE);
FSDataOutputStream stm=null;
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
Thread.sleep(500);
LOG.info("Starting with NN 0 active");
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
stm=fs.create(TEST_PATH);
AppendTestUtil.write(stm,0,BLOCK_AND_A_HALF);
stm.hflush();
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
assertTrue(fs.exists(TEST_PATH));
FileSystem fsOtherUser=createFsAsOtherUser(cluster,conf);
loopRecoverLease(fsOtherUser,TEST_PATH);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_AND_A_HALF);
cluster.transitionToStandby(1);
cluster.transitionToActive(0);
AppendTestUtil.check(fs,TEST_PATH,BLOCK_AND_A_HALF);
}
finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
/**
* Test that quotas are properly tracked by the standby through
* create, append, delete.
*/
@Test(timeout=60000) public void testQuotasTrackedOnStandby() throws Exception {
fs.mkdirs(TEST_DIR);
DistributedFileSystem dfs=(DistributedFileSystem)fs;
dfs.setQuota(TEST_DIR,NS_QUOTA,DS_QUOTA);
long expectedSize=3 * BLOCK_SIZE + BLOCK_SIZE / 2;
DFSTestUtil.createFile(fs,TEST_FILE,expectedSize,(short)1,1L);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
ContentSummary cs=nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
assertEquals(NS_QUOTA,cs.getQuota());
assertEquals(DS_QUOTA,cs.getSpaceQuota());
assertEquals(expectedSize,cs.getSpaceConsumed());
assertEquals(1,cs.getDirectoryCount());
assertEquals(1,cs.getFileCount());
FSDataOutputStream stm=fs.append(TEST_FILE);
try {
byte[] data=new byte[(int)(BLOCK_SIZE * 3 / 2)];
stm.write(data);
expectedSize+=data.length;
}
finally {
IOUtils.closeStream(stm);
}
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
cs=nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
assertEquals(NS_QUOTA,cs.getQuota());
assertEquals(DS_QUOTA,cs.getSpaceQuota());
assertEquals(expectedSize,cs.getSpaceConsumed());
assertEquals(1,cs.getDirectoryCount());
assertEquals(1,cs.getFileCount());
fs.delete(TEST_FILE,true);
expectedSize=0;
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
cs=nn1.getRpcServer().getContentSummary(TEST_DIR_STR);
assertEquals(NS_QUOTA,cs.getQuota());
assertEquals(DS_QUOTA,cs.getSpaceQuota());
assertEquals(expectedSize,cs.getSpaceConsumed());
assertEquals(1,cs.getDirectoryCount());
assertEquals(0,cs.getFileCount());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* 1. Run a set of operations
* 2. Trigger the NN failover
* 3. Check the retry cache on the original standby NN
*/
@Test(timeout=60000) public void testRetryCacheOnStandbyNN() throws Exception {
DFSTestUtil.runOperations(cluster,dfs,conf,BlockSize,0);
FSNamesystem fsn0=cluster.getNamesystem(0);
LightWeightCache cacheSet=(LightWeightCache)fsn0.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
Map oldEntries=new HashMap();
Iterator iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
oldEntries.put(entry,entry);
}
cluster.getNameNode(0).getRpcServer().rollEditLog();
cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
FSNamesystem fsn1=cluster.getNamesystem(1);
cacheSet=(LightWeightCache)fsn1.getRetryCache().getCacheSet();
assertEquals(23,cacheSet.size());
iter=cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry=iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testInvalidateBlock() throws Exception {
Configuration conf=new Configuration();
HAUtil.setAllowStandbyReads(conf,true);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
try {
cluster.waitActive();
cluster.transitionToActive(0);
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf);
Thread.sleep(1000);
LOG.info("==================================");
DFSTestUtil.writeFile(fs,TEST_FILE_PATH,TEST_FILE_DATA);
nn1.getRpcServer().rollEditLog();
LOG.info("==================================");
fs.delete(TEST_FILE_PATH,false);
BlockManagerTestUtil.computeAllPendingWork(nn1.getNamesystem().getBlockManager());
nn1.getRpcServer().rollEditLog();
assertEquals(0,nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
cluster.triggerHeartbeats();
cluster.triggerBlockReports();
assertEquals(0,nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testReadsAllowedDuringCheckpoint() throws Exception {
FSImage spyImage1=NameNodeAdapter.spyOnFsImage(nn1);
DelayAnswer answerer=new DelayAnswer(LOG);
Mockito.doAnswer(answerer).when(spyImage1).saveNamespace(Mockito.any(FSNamesystem.class),Mockito.any(NameNodeFile.class),Mockito.any(Canceler.class));
doEdits(0,1000);
nn0.getRpcServer().rollEditLog();
answerer.waitForCall();
assertTrue("SBN is not performing checkpoint but it should be.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
Thread t=new Thread(){
@Override public void run(){
try {
nn1.getRpcServer().restoreFailedStorage("false");
}
catch ( IOException e) {
e.printStackTrace();
}
}
}
;
t.start();
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
assertFalse(nn1.getNamesystem().getFsLockForTests().hasQueuedThreads());
assertFalse(nn1.getNamesystem().getFsLockForTests().isWriteLocked());
assertTrue(nn1.getNamesystem().getLongReadLockForTests().hasQueuedThreads());
String pageContents=DFSTestUtil.urlGet(new URL("http://" + nn1.getHttpAddress().getHostName() + ":"+ nn1.getHttpAddress().getPort()+ "/jmx"));
assertTrue(pageContents.contains("NumLiveDataNodes"));
assertTrue("SBN should have still been checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
answerer.proceed();
answerer.waitForResult();
assertTrue("SBN should have finished checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
t.join();
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Make sure that clients will receive StandbyExceptions even when a
* checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer
* thread will have FSNS lock. Regression test for HDFS-4591.
*/
@Test(timeout=300000) public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
FSImage spyImage1=NameNodeAdapter.spyOnFsImage(nn1);
DelayAnswer answerer=new DelayAnswer(LOG);
Mockito.doAnswer(answerer).when(spyImage1).saveNamespace(Mockito.any(FSNamesystem.class),Mockito.eq(NameNodeFile.IMAGE),Mockito.any(Canceler.class));
doEdits(0,1000);
nn0.getRpcServer().rollEditLog();
answerer.waitForCall();
assertTrue("SBN is not performing checkpoint but it should be.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
try {
nn1.getRpcServer().getFileInfo("/");
fail("Should have thrown StandbyException, but instead succeeded.");
}
catch ( StandbyException se) {
GenericTestUtils.assertExceptionContains("is not supported",se);
}
assertTrue("SBN should have still been checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
answerer.proceed();
answerer.waitForResult();
assertTrue("SBN should have finished checkpointing.",answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
}
InternalCallVerifier EqualityVerifier
/**
* Test cancellation of ongoing checkpoints when failover happens
* mid-checkpoint during image upload from standby to active NN.
*/
@Test(timeout=60000) public void testCheckpointCancellationDuringUpload() throws Exception {
cluster.getConfiguration(0).setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,false);
cluster.getConfiguration(1).setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,false);
cluster.getConfiguration(1).setLong(DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY,100);
cluster.restartNameNode(0);
cluster.restartNameNode(1);
nn0=cluster.getNameNode(0);
nn1=cluster.getNameNode(1);
cluster.transitionToActive(0);
doEdits(0,100);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(104));
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.shutdown();
cluster=null;
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
ThreadMXBean threadBean=ManagementFactory.getThreadMXBean();
ThreadInfo[] threads=threadBean.getThreadInfo(threadBean.getAllThreadIds(),1);
for ( ThreadInfo thread : threads) {
if (thread.getThreadName().startsWith("TransferFsImageUpload")) {
return false;
}
}
return true;
}
}
,1000,30000);
assertEquals(0,nn0.getFSImage().getMostRecentCheckpointTxId());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-2795:
* - Start an HA cluster with a DN.
* - Write several blocks to the FS with replication 1.
* - Shutdown the DN
* - Wait for the NNs to declare the DN dead. All blocks will be under-replicated.
* - Restart the DN.
* In the bug, the standby node would only very slowly notice the blocks returning
* to the cluster.
*/
@Test(timeout=60000) public void testDatanodeRestarts() throws Exception {
Configuration conf=new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024);
HAUtil.setAllowStandbyReads(conf,true);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,0);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
try {
NameNode nn0=cluster.getNameNode(0);
NameNode nn1=cluster.getNameNode(1);
cluster.transitionToActive(0);
DFSTestUtil.createFile(cluster.getFileSystem(0),TEST_FILE_PATH,5 * 1024,(short)1,1L);
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
DataNode dn=cluster.getDataNodes().get(0);
String dnName=dn.getDatanodeId().getXferAddr();
DataNodeProperties dnProps=cluster.stopDataNode(0);
BlockManagerTestUtil.noticeDeadDatanode(nn0,dnName);
BlockManagerTestUtil.noticeDeadDatanode(nn1,dnName);
BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
assertEquals(5,nn0.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn1.getNamesystem().getUnderReplicatedBlocks());
LocatedBlocks locs=nn1.getRpcServer().getBlockLocations(TEST_FILE,0,1);
assertEquals("Standby should have registered that the block has no replicas",0,locs.get(0).getLocations().length);
cluster.restartDataNode(dnProps);
cluster.waitActive(0);
cluster.waitActive(1);
BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager());
BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
assertEquals(0,nn0.getNamesystem().getUnderReplicatedBlocks());
assertEquals(0,nn1.getNamesystem().getUnderReplicatedBlocks());
locs=nn1.getRpcServer().getBlockLocations(TEST_FILE,0,1);
assertEquals("Standby should have registered that the block has replicas again",1,locs.get(0).getLocations().length);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier
/**
* Test that xattrs are properly tracked by the standby
*/
@Test(timeout=60000) public void testXAttrsTrackedOnStandby() throws Exception {
fs.create(path).close();
fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE));
HATestUtil.waitForStandbyToCatchUp(nn0,nn1);
List xAttrs=nn1.getRpcServer().getXAttrs("/file",null);
assertEquals(2,xAttrs.size());
cluster.shutdownNameNode(0);
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
Map xattrs=fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
fs.delete(path,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testOriginalAclEnforcedForSnapshotContentsAfterRemoval() throws Exception {
Path filePath=new Path(path,"file1");
Path subdirPath=new Path(path,"subdir1");
Path fileSnapshotPath=new Path(snapshotPath,"file1");
Path subdirSnapshotPath=new Path(snapshotPath,"subdir1");
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close();
FileSystem.mkdirs(hdfs,subdirPath,FsPermission.createImmutable((short)0700));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_EXECUTE),aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE));
hdfs.setAcl(filePath,aclSpec);
hdfs.setAcl(subdirPath,aclSpec);
assertFilePermissionGranted(fsAsBruce,BRUCE,filePath);
assertFilePermissionDenied(fsAsDiana,DIANA,filePath);
assertDirPermissionGranted(fsAsBruce,BRUCE,subdirPath);
assertDirPermissionDenied(fsAsDiana,DIANA,subdirPath);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)};
AclStatus s=hdfs.getAclStatus(filePath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)010550,filePath);
s=hdfs.getAclStatus(subdirPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)010550,subdirPath);
s=hdfs.getAclStatus(fileSnapshotPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)010550,fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce,BRUCE,fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana,DIANA,fileSnapshotPath);
s=hdfs.getAclStatus(subdirSnapshotPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)010550,subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce,BRUCE,subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana,DIANA,subdirSnapshotPath);
hdfs.removeAcl(filePath);
hdfs.removeAcl(subdirPath);
doSnapshotContentsRemovalAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath);
restart(false);
doSnapshotContentsRemovalAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath);
restart(true);
doSnapshotContentsRemovalAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath);
}
InternalCallVerifier EqualityVerifier
@Test public void testChangeAclExceedsQuota() throws Exception {
Path filePath=new Path(path,"file1");
Path fileSnapshotPath=new Path(snapshotPath,"file1");
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path,3,HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close();
hdfs.setPermission(filePath,FsPermission.createImmutable((short)0600));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_WRITE));
hdfs.modifyAclEntries(filePath,aclSpec);
hdfs.createSnapshot(path,snapshotName);
AclStatus s=hdfs.getAclStatus(filePath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,NONE)},returned);
assertPermission((short)010660,filePath);
s=hdfs.getAclStatus(fileSnapshotPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,NONE)},returned);
assertPermission((short)010660,filePath);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ));
exception.expect(NSQuotaExceededException.class);
hdfs.modifyAclEntries(filePath,aclSpec);
}
InternalCallVerifier EqualityVerifier
@Test public void testOriginalAclEnforcedForSnapshotRootAfterRemoval() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE));
hdfs.setAcl(path,aclSpec);
assertDirPermissionGranted(fsAsBruce,BRUCE,path);
assertDirPermissionDenied(fsAsDiana,DIANA,path);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
AclStatus s=hdfs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)},returned);
assertPermission((short)010750,path);
s=hdfs.getAclStatus(snapshotPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)},returned);
assertPermission((short)010750,snapshotPath);
assertDirPermissionGranted(fsAsBruce,BRUCE,snapshotPath);
assertDirPermissionDenied(fsAsDiana,DIANA,snapshotPath);
hdfs.removeAcl(path);
doSnapshotRootRemovalAssertions(path,snapshotPath);
restart(false);
doSnapshotRootRemovalAssertions(path,snapshotPath);
restart(true);
doSnapshotRootRemovalAssertions(path,snapshotPath);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",ALL));
hdfs.modifyAclEntries(path,aclSpec);
hdfs.removeAcl(path);
AclEntry[] expected=new AclEntry[]{};
AclStatus s=hdfs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)0700,path);
assertDirPermissionDenied(fsAsBruce,BRUCE,path);
assertDirPermissionDenied(fsAsDiana,DIANA,path);
}
InternalCallVerifier EqualityVerifier
@Test public void testOriginalAclEnforcedForSnapshotRootAfterChange() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE));
hdfs.setAcl(path,aclSpec);
assertDirPermissionGranted(fsAsBruce,BRUCE,path);
assertDirPermissionDenied(fsAsDiana,DIANA,path);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
AclStatus s=hdfs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)},returned);
assertPermission((short)010750,path);
s=hdfs.getAclStatus(snapshotPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)},returned);
assertPermission((short)010750,snapshotPath);
assertDirPermissionGranted(fsAsBruce,BRUCE,snapshotPath);
assertDirPermissionDenied(fsAsDiana,DIANA,snapshotPath);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_EXECUTE),aclEntry(ACCESS,USER,"diana",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE));
hdfs.setAcl(path,aclSpec);
doSnapshotRootChangeAssertions(path,snapshotPath);
restart(false);
doSnapshotRootChangeAssertions(path,snapshotPath);
restart(true);
doSnapshotRootChangeAssertions(path,snapshotPath);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"bruce",READ_EXECUTE));
hdfs.modifyAclEntries(path,aclSpec);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
AclStatus s=hdfs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ_EXECUTE),aclEntry(DEFAULT,GROUP,NONE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010700,path);
s=hdfs.getAclStatus(snapshotPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ_EXECUTE),aclEntry(DEFAULT,GROUP,NONE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned);
assertPermission((short)010700,snapshotPath);
assertDirPermissionDenied(fsAsBruce,BRUCE,snapshotPath);
}
InternalCallVerifier EqualityVerifier
@Test public void testModifyReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",ALL));
hdfs.modifyAclEntries(path,aclSpec);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana",READ_EXECUTE));
hdfs.modifyAclEntries(path,aclSpec);
AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"bruce",ALL),aclEntry(ACCESS,USER,"diana",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)};
AclStatus s=hdfs.getAclStatus(path);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)010770,path);
assertDirPermissionGranted(fsAsBruce,BRUCE,path);
assertDirPermissionGranted(fsAsDiana,DIANA,path);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveAclExceedsQuota() throws Exception {
Path filePath=new Path(path,"file1");
Path fileSnapshotPath=new Path(snapshotPath,"file1");
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path,3,HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close();
hdfs.setPermission(filePath,FsPermission.createImmutable((short)0600));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_WRITE));
hdfs.modifyAclEntries(filePath,aclSpec);
hdfs.createSnapshot(path,snapshotName);
AclStatus s=hdfs.getAclStatus(filePath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,NONE)},returned);
assertPermission((short)010660,filePath);
s=hdfs.getAclStatus(fileSnapshotPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,NONE)},returned);
assertPermission((short)010660,filePath);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ));
exception.expect(NSQuotaExceededException.class);
hdfs.removeAcl(filePath);
}
InternalCallVerifier EqualityVerifier
@Test public void testGetAclStatusDotSnapshotPath() throws Exception {
hdfs.mkdirs(path);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
AclStatus s=hdfs.getAclStatus(new Path(path,".snapshot"));
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[]{},returned);
}
InternalCallVerifier EqualityVerifier
@Test public void testOriginalAclEnforcedForSnapshotContentsAfterChange() throws Exception {
Path filePath=new Path(path,"file1");
Path subdirPath=new Path(path,"subdir1");
Path fileSnapshotPath=new Path(snapshotPath,"file1");
Path subdirSnapshotPath=new Path(snapshotPath,"subdir1");
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close();
FileSystem.mkdirs(hdfs,subdirPath,FsPermission.createImmutable((short)0700));
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_EXECUTE),aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE));
hdfs.setAcl(filePath,aclSpec);
hdfs.setAcl(subdirPath,aclSpec);
assertFilePermissionGranted(fsAsBruce,BRUCE,filePath);
assertFilePermissionDenied(fsAsDiana,DIANA,filePath);
assertDirPermissionGranted(fsAsBruce,BRUCE,subdirPath);
assertDirPermissionDenied(fsAsDiana,DIANA,subdirPath);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)};
AclStatus s=hdfs.getAclStatus(filePath);
AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)010550,filePath);
s=hdfs.getAclStatus(subdirPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)010550,subdirPath);
s=hdfs.getAclStatus(fileSnapshotPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)010550,fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce,BRUCE,fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana,DIANA,fileSnapshotPath);
s=hdfs.getAclStatus(subdirSnapshotPath);
returned=s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected,returned);
assertPermission((short)010550,subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce,BRUCE,subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana,DIANA,subdirSnapshotPath);
aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_EXECUTE),aclEntry(ACCESS,USER,"diana",ALL),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE));
hdfs.setAcl(filePath,aclSpec);
hdfs.setAcl(subdirPath,aclSpec);
doSnapshotContentsChangeAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath);
restart(false);
doSnapshotContentsChangeAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath);
restart(true);
doSnapshotContentsChangeAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath);
}
InternalCallVerifier EqualityVerifier
/**
* Regression test for HDFS-5433 - "When reloading fsimage during
* checkpointing, we should clear existing snapshottable directories"
*/
@Test public void testCheckpoint() throws IOException {
MiniDFSCluster cluster=null;
SecondaryNameNode secondary=null;
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
secondary=new SecondaryNameNode(conf);
SnapshotManager nnSnapshotManager=cluster.getNamesystem().getSnapshotManager();
SnapshotManager secondarySnapshotManager=secondary.getFSNamesystem().getSnapshotManager();
FileSystem fs=cluster.getFileSystem();
HdfsAdmin admin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf);
assertEquals(0,nnSnapshotManager.getNumSnapshots());
assertEquals(0,nnSnapshotManager.getNumSnapshottableDirs());
assertEquals(0,secondarySnapshotManager.getNumSnapshots());
assertEquals(0,secondarySnapshotManager.getNumSnapshottableDirs());
fs.mkdirs(TEST_PATH);
admin.allowSnapshot(TEST_PATH);
assertEquals(0,nnSnapshotManager.getNumSnapshots());
assertEquals(1,nnSnapshotManager.getNumSnapshottableDirs());
Path snapshotPath=fs.createSnapshot(TEST_PATH);
assertEquals(1,nnSnapshotManager.getNumSnapshots());
assertEquals(1,nnSnapshotManager.getNumSnapshottableDirs());
secondary.doCheckpoint();
assertEquals(1,secondarySnapshotManager.getNumSnapshots());
assertEquals(1,secondarySnapshotManager.getNumSnapshottableDirs());
fs.deleteSnapshot(TEST_PATH,snapshotPath.getName());
admin.disallowSnapshot(TEST_PATH);
assertEquals(0,nnSnapshotManager.getNumSnapshots());
assertEquals(0,nnSnapshotManager.getNumSnapshottableDirs());
NameNodeAdapter.enterSafeMode(cluster.getNameNode(),false);
NameNodeAdapter.saveNamespace(cluster.getNameNode());
NameNodeAdapter.leaveSafeMode(cluster.getNameNode());
secondary.doCheckpoint();
assertEquals(0,secondarySnapshotManager.getNumSnapshots());
assertEquals(0,secondarySnapshotManager.getNumSnapshottableDirs());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
if (secondary != null) {
secondary.shutdown();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test snapshot during file appending, before the corresponding{@link FSDataOutputStream} instance closes.
*/
@Test(timeout=60000) public void testSnapshotWhileAppending() throws Exception {
Path file=new Path(dir,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
HdfsDataOutputStream out=appendFileWithoutClosing(file,BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
SnapshotTestHelper.createSnapshot(hdfs,dir,"s0");
out.close();
INodeFile fileNode=(INodeFile)fsdir.getINode(file.toString());
assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize());
INodeDirectory dirNode=fsdir.getINode(dir.toString()).asDirectory();
DirectoryDiff last=dirNode.getDiffs().getLast();
out=appendFileWithoutClosing(file,BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
dirNode=fsdir.getINode(dir.toString()).asDirectory();
assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize(last.getSnapshotId()));
hdfs.createSnapshot(dir,"s1");
out.close();
fileNode=(INodeFile)fsdir.getINode(file.toString());
dirNode=fsdir.getINode(dir.toString()).asDirectory();
last=dirNode.getDiffs().getLast();
assertTrue(fileNode.isWithSnapshot());
assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(last.getSnapshotId()));
hdfs.setReplication(file,(short)(REPLICATION - 1));
out=appendFileWithoutClosing(file,BLOCKSIZE);
hdfs.createSnapshot(dir,"s2");
out.close();
assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(last.getSnapshotId()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
* blocks within the size range are returned.
*/
@Test public void testGetBlockLocations() throws Exception {
final Path root=new Path("/");
final Path file=new Path("/file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,root,"s1");
final Path fileInSnapshot=SnapshotTestHelper.getSnapshotPath(root,"s1",file.getName());
FileStatus status=hdfs.getFileStatus(fileInSnapshot);
assertEquals(BLOCKSIZE,status.getLen());
DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE - 1);
status=hdfs.getFileStatus(fileInSnapshot);
assertEquals(BLOCKSIZE,status.getLen());
status=hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 2 - 1,status.getLen());
LocatedBlocks blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot.toString(),0,Long.MAX_VALUE);
List blockList=blocks.getLocatedBlocks();
assertEquals(BLOCKSIZE,blocks.getFileLength());
assertEquals(1,blockList.size());
LocatedBlock lastBlock=blocks.getLastLocatedBlock();
assertEquals(0,lastBlock.getStartOffset());
assertEquals(BLOCKSIZE,lastBlock.getBlockSize());
SnapshotTestHelper.createSnapshot(hdfs,root,"s2");
final Path fileInSnapshot2=SnapshotTestHelper.getSnapshotPath(root,"s2",file.getName());
HdfsDataOutputStream out=appendFileWithoutClosing(file,BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
status=hdfs.getFileStatus(fileInSnapshot2);
assertEquals(BLOCKSIZE * 2 - 1,status.getLen());
status=hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 3 - 1,status.getLen());
blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),0,Long.MAX_VALUE);
assertFalse(blocks.isUnderConstruction());
assertTrue(blocks.isLastBlockComplete());
blockList=blocks.getLocatedBlocks();
assertEquals(BLOCKSIZE * 2 - 1,blocks.getFileLength());
assertEquals(2,blockList.size());
lastBlock=blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE,lastBlock.getStartOffset());
assertEquals(BLOCKSIZE,lastBlock.getBlockSize());
blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),BLOCKSIZE,0);
blockList=blocks.getLocatedBlocks();
assertEquals(1,blockList.size());
blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),file.toString(),0,Long.MAX_VALUE);
blockList=blocks.getLocatedBlocks();
assertEquals(3,blockList.size());
assertTrue(blocks.isUnderConstruction());
assertFalse(blocks.isLastBlockComplete());
lastBlock=blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE * 2,lastBlock.getStartOffset());
assertEquals(BLOCKSIZE - 1,lastBlock.getBlockSize());
out.close();
}
InternalCallVerifier EqualityVerifier
/**
* Test snapshot after file appending
*/
@Test(timeout=60000) public void testSnapshotAfterAppending() throws Exception {
Path file=new Path(dir,"file");
SnapshotTestHelper.createSnapshot(hdfs,dir,"s0");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE);
INodeFile fileNode=(INodeFile)fsdir.getINode(file.toString());
hdfs.createSnapshot(dir,"s1");
hdfs.setReplication(file,(short)(REPLICATION - 1));
DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE);
fileNode=(INodeFile)fsdir.getINode(file.toString());
assertEquals(REPLICATION - 1,fileNode.getFileReplication());
assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize());
hdfs.createSnapshot(dir,"s2");
DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE);
fileNode=(INodeFile)fsdir.getINode(file.toString());
assertEquals(REPLICATION - 1,fileNode.getFileReplication());
assertEquals(BLOCKSIZE * 4,fileNode.computeFileSize());
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test the snapshot limit of a single snapshottable directory.
* @throws Exception
*/
@Test(timeout=300000) public void testSnapshotLimit() throws Exception {
final int step=1000;
final String dirStr="/testSnapshotLimit/dir";
final Path dir=new Path(dirStr);
hdfs.mkdirs(dir,new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
int s=0;
for (; s < SNAPSHOT_LIMIT; s++) {
final String snapshotName="s" + s;
hdfs.createSnapshot(dir,snapshotName);
if (s % step == 0) {
final Path file=new Path(dirStr,"f" + s);
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,SEED);
}
}
try {
hdfs.createSnapshot(dir,"s" + s);
Assert.fail("Expected to fail to create snapshot, but didn't.");
}
catch ( IOException ioe) {
SnapshotTestHelper.LOG.info("The exception is expected.",ioe);
}
for (int f=0; f < SNAPSHOT_LIMIT; f+=step) {
final String file="f" + f;
s=RANDOM.nextInt(step);
for (; s < SNAPSHOT_LIMIT; s+=RANDOM.nextInt(step)) {
final Path p=SnapshotTestHelper.getSnapshotPath(dir,"s" + s,file);
Assert.assertEquals(s > f,hdfs.exists(p));
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testSnapshotWithQuota() throws Exception {
final String dirStr="/testSnapshotWithQuota/dir";
final Path dir=new Path(dirStr);
hdfs.mkdirs(dir,new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
final int NS_QUOTA=6;
hdfs.setQuota(dir,NS_QUOTA,HdfsConstants.QUOTA_DONT_SET);
final Path foo=new Path(dir,"foo");
final Path f1=new Path(foo,"f1");
DFSTestUtil.createFile(hdfs,f1,BLOCKSIZE,REPLICATION,SEED);
{
final Path snapshotPath=hdfs.createSnapshot(dir);
final String snapshotName=snapshotPath.getName();
Assert.assertTrue("snapshotName=" + snapshotName,Pattern.matches("s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",snapshotName));
final Path parent=snapshotPath.getParent();
Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR,parent.getName());
Assert.assertEquals(dir,parent.getParent());
}
final Path f2=new Path(foo,"f2");
DFSTestUtil.createFile(hdfs,f2,BLOCKSIZE,REPLICATION,SEED);
try {
final Path f3=new Path(foo,"f3");
DFSTestUtil.createFile(hdfs,f3,BLOCKSIZE,REPLICATION,SEED);
Assert.fail();
}
catch ( NSQuotaExceededException e) {
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
try {
hdfs.createSnapshot(dir);
Assert.fail();
}
catch ( NSQuotaExceededException e) {
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
try {
hdfs.setPermission(f1,new FsPermission((short)0));
Assert.fail();
}
catch ( RemoteException e) {
Assert.assertSame(NSQuotaExceededException.class,e.unwrapRemoteException().getClass());
SnapshotTestHelper.LOG.info("The exception is expected.",e);
}
hdfs.setPermission(f2,new FsPermission((short)0));
hdfs.setQuota(dir,NS_QUOTA + 2,HdfsConstants.QUOTA_DONT_SET);
hdfs.createSnapshot(dir,"s1");
hdfs.setPermission(foo,new FsPermission((short)0444));
}
InternalCallVerifier BooleanVerifier
/**
* When we have nested snapshottable directories and if we try to reset the
* snapshottable descendant back to an regular directory, we need to replace
* the snapshottable descendant with an INodeDirectoryWithSnapshot
*/
@Test public void testDisallowNestedSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path dir=new Path("/dir");
final Path sub=new Path(dir,"sub");
hdfs.mkdirs(sub);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
final Path file=new Path(sub,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,SEED);
FSDirectory fsdir=cluster.getNamesystem().getFSDirectory();
INode subNode=fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
hdfs.allowSnapshot(sub);
subNode=fsdir.getINode(sub.toString());
assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
hdfs.disallowSnapshot(sub);
subNode=fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test {@link Snapshot#ID_COMPARATOR}.
*/
@Test(timeout=300000) public void testIdCmp(){
final PermissionStatus perm=PermissionStatus.createImmutable("user","group",FsPermission.createImmutable((short)0));
final INodeDirectory snapshottable=new INodeDirectory(0,DFSUtil.string2Bytes("foo"),perm,0L);
snapshottable.addSnapshottableFeature();
final Snapshot[] snapshots={new Snapshot(1,"s1",snapshottable),new Snapshot(1,"s1",snapshottable),new Snapshot(2,"s2",snapshottable),new Snapshot(2,"s2",snapshottable)};
Assert.assertEquals(0,Snapshot.ID_COMPARATOR.compare(null,null));
for ( Snapshot s : snapshots) {
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null,s) > 0);
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s,null) < 0);
for ( Snapshot t : snapshots) {
final int expected=s.getRoot().getLocalName().compareTo(t.getRoot().getLocalName());
final int computed=Snapshot.ID_COMPARATOR.compare(s,t);
Assert.assertEquals(expected > 0,computed > 0);
Assert.assertEquals(expected == 0,computed == 0);
Assert.assertEquals(expected < 0,computed < 0);
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testRenameFromSDir2NonSDir() throws Exception {
final String dirStr="/testRenameWithSnapshot";
final String abcStr=dirStr + "/abc";
final Path abc=new Path(abcStr);
hdfs.mkdirs(abc,new FsPermission((short)0777));
hdfs.allowSnapshot(abc);
final Path foo=new Path(abc,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(abc,"s0");
try {
hdfs.rename(abc,new Path(dirStr,"tmp"));
fail("Expect exception since " + abc + " is snapshottable and already has snapshots");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains(abcStr + " is snapshottable and already has snapshots",e);
}
final String xyzStr=dirStr + "/xyz";
final Path xyz=new Path(xyzStr);
hdfs.mkdirs(xyz,new FsPermission((short)0777));
final Path bar=new Path(xyz,"bar");
hdfs.rename(foo,bar);
final INode fooRef=fsdir.getINode(SnapshotTestHelper.getSnapshotPath(abc,"s0","foo").toString());
Assert.assertTrue(fooRef.isReference());
Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
final INodeReference.WithCount withCount=(INodeReference.WithCount)fooRef.asReference().getReferredINode();
Assert.assertEquals(2,withCount.getReferenceCount());
final INode barRef=fsdir.getINode(bar.toString());
Assert.assertTrue(barRef.isReference());
Assert.assertSame(withCount,barRef.asReference().getReferredINode());
hdfs.delete(bar,false);
Assert.assertEquals(1,withCount.getReferenceCount());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Make sure we clean the whole subtree under a DstReference node after
* deleting a snapshot.
* see HDFS-5476.
*/
@Test public void testCleanDstReference() throws Exception {
final Path test=new Path("/test");
final Path foo=new Path(test,"foo");
final Path bar=new Path(foo,"bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
final Path fileInBar=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,fileInBar,BLOCKSIZE,REPL,SEED);
final Path foo2=new Path(test,"foo2");
hdfs.rename(foo,foo2);
hdfs.createSnapshot(test,"s1");
hdfs.delete(new Path(foo2,"bar"),true);
hdfs.delete(foo2,true);
final Path sfileInBar=SnapshotTestHelper.getSnapshotPath(test,"s1","foo2/bar/file");
assertTrue(hdfs.exists(sfileInBar));
hdfs.deleteSnapshot(test,"s1");
assertFalse(hdfs.exists(sfileInBar));
restartClusterAndCheckImage(true);
final Path barInS0=SnapshotTestHelper.getSnapshotPath(test,"s0","foo/bar");
INodeDirectory barNode=fsdir.getINode(barInS0.toString()).asDirectory();
assertEquals(0,barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
List diffList=barNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertEquals(0,diff.getChildrenDiff().getList(ListType.DELETED).size());
assertEquals(0,diff.getChildrenDiff().getList(ListType.CREATED).size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir before taking the snapshot.
*/
@Test public void testRenameUndo_1() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
INode fooNode_s1=fsdir.getINode(foo_s1.toString());
assertTrue(fooNode_s1 == fooNode);
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Unit test for HDFS-4842.
*/
@Test public void testRenameDirAndDeleteSnapshot_7() throws Exception {
fsn.getSnapshotManager().setAllowNestedSnapshots(true);
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo=new Path(dir2,"foo");
final Path bar=new Path(foo,"bar");
final Path file=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
SnapshotTestHelper.createSnapshot(hdfs,test,"s1");
hdfs.delete(file,true);
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
final Path newfoo=new Path(dir1,foo.getName());
hdfs.rename(foo,newfoo);
hdfs.deleteSnapshot(test,"s1");
final Path file_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2","foo/bar/file");
assertFalse(hdfs.exists(file_s2));
final Path file_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo/bar/file");
assertTrue(hdfs.exists(file_s0));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List dir1DiffList=dir1Node.getDiffs().asList();
assertEquals(1,dir1DiffList.size());
List dList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertTrue(dList.isEmpty());
List cList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1,cList.size());
INode cNode=cList.get(0);
INode fooNode=fsdir.getINode4Write(newfoo.toString());
assertSame(cNode,fooNode);
final Path newbar=new Path(newfoo,bar.getName());
INodeDirectory barNode=fsdir.getINode4Write(newbar.toString()).asDirectory();
assertSame(fooNode.asDirectory(),barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
DirectoryDiff diff=barDiffList.get(0);
INodeDirectory testNode=fsdir.getINode4Write(test.toString()).asDirectory();
Snapshot s0=testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(),diff.getSnapshotId());
assertEquals("file",diff.getChildrenDiff().getList(ListType.DELETED).get(0).getLocalName());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
List dir2DiffList=dir2Node.getDiffs().asList();
assertEquals(1,dir2DiffList.size());
dList=dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertEquals(1,dList.size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2",foo.getName());
INodeReference.WithName fooNode_s2=(INodeReference.WithName)fsdir.getINode(foo_s2.toString());
assertSame(dList.get(0),fooNode_s2);
assertSame(fooNode.asReference().getReferredINode(),fooNode_s2.getReferredINode());
restartClusterAndCheckImage(true);
}
InternalCallVerifier BooleanVerifier
/**
* Rename a file and then append the same file.
*/
@Test public void testRenameAndAppend() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir1,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,snap1);
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
INode fooRef=fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
FSDataOutputStream out=hdfs.append(foo2);
try {
byte[] content=new byte[1024];
(new Random()).nextBytes(content);
out.write(content);
fooRef=fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
INodeFile fooNode=fooRef.asFile();
assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.isUnderConstruction());
}
finally {
if (out != null) {
out.close();
}
}
fooRef=fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
INodeFile fooNode=fooRef.asFile();
assertTrue(fooNode.isWithSnapshot());
assertFalse(fooNode.isUnderConstruction());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir after taking the snapshot.
*/
@Test public void testRenameUndo_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path dir2file=new Path(sdir2,"file");
DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2=spy(dir2);
doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2,mockDir2,fsdir.getINodeMap());
final Path newfoo=new Path(sdir2,"foo");
boolean result=hdfs.rename(foo,newfoo);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir1Children.size());
assertEquals(foo.getName(),dir1Children.get(0).getLocalName());
List dir1Diffs=dir1Node.getDiffs().asList();
assertEquals(1,dir1Diffs.size());
assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
INode fooNode=fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectory);
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
assertFalse(hdfs.exists(foo_s1));
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test rename a dir and a file multiple times across snapshottable
* directories: /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Only create snapshots in the beginning (before the rename).
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar2_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar2_dir2=new Path(sdir2,"bar");
hdfs.rename(bar2_dir1,bar2_dir2);
restartClusterAndCheckImage(true);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar2_dir2,REPL_1);
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar2_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar1_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar1");
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL_1,statusBar1.getReplication());
FileStatus statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL_1,statusBar2.getReplication());
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar2_dir3=new Path(sdir3,"bar");
hdfs.rename(bar2_dir2,bar2_dir3);
restartClusterAndCheckImage(true);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar2_dir3,REPL_2);
final Path bar1_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","foo/bar1");
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir3);
assertEquals(REPL_2,statusBar2.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar2_dir3,bar2_dir2);
restartClusterAndCheckImage(true);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar2_dir2,REPL);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
statusBar2=hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL,statusBar2.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar2_dir2,bar2_dir1);
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(2,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
assertEquals(1,foo.getDiffs().asList().size());
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),foo.getDirectoryWithSnapshotFeature().getLastSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1,bar1.getDiffs().asList().size());
assertEquals(s1.getId(),bar1.getDiffs().getLastSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar2_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(2,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
assertEquals(1,bar.getDiffs().asList().size());
assertEquals(s1.getId(),bar.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar2_dir1,true);
restartClusterAndCheckImage(true);
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
assertFalse(hdfs.exists(foo_dir1));
assertFalse(hdfs.exists(bar1_dir1));
assertFalse(hdfs.exists(bar2_dir1));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar2=hdfs.getFileStatus(bar2_s1);
assertEquals(REPL,statusBar2.getReplication());
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo");
fooRef=fsdir.getINode(foo_s1.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWithCount.getReferenceCount());
barRef=fsdir.getINode(bar2_s1.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(1,barWithCount.getReferenceCount());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the undo section of the second-time rename.
*/
@Test public void testRenameUndo_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3=spy(dir3);
doReturn(false).when(mockDir3).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt());
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3,mockDir3,fsdir.getINodeMap());
final Path foo_dir2=new Path(sdir2,"foo2");
final Path foo_dir3=new Path(sdir3,"foo3");
hdfs.rename(foo,foo_dir2);
boolean result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s2=dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
List dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(1,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo2");
assertFalse(hdfs.exists(foo_s2));
INode fooNode=fsdir.getINode4Write(foo_dir2.toString());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
assertTrue(fooNode instanceof INodeReference.DstReference);
List fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(1,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
hdfs.createSnapshot(sdir2,"s3");
result=hdfs.rename(foo_dir2,foo_dir3);
assertFalse(result);
dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s3=dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode=fsdir.getINode4Write(foo_dir2.toString());
dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,dir2Children.size());
dir2Diffs=dir2Node.getDiffs().asList();
assertEquals(2,dir2Diffs.size());
assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId());
assertEquals(s3.getId(),dir2Diffs.get(1).getSnapshotId());
childrenDiff=dir2Diffs.get(0).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(1,childrenDiff.getList(ListType.CREATED).size());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
childrenDiff=dir2Diffs.get(1).getChildrenDiff();
assertEquals(0,childrenDiff.getList(ListType.DELETED).size());
assertEquals(0,childrenDiff.getList(ListType.CREATED).size());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo2");
assertFalse(hdfs.exists(foo_s2));
assertTrue(hdfs.exists(foo_s3));
assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs=fooNode.asDirectory().getDiffs().asList();
assertEquals(2,fooDiffs.size());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
assertEquals(s3.getId(),fooDiffs.get(1).getSnapshotId());
}
UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test undo where dst node being overwritten is a reference node
*/
@Test public void testRenameUndo_4() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
final Path foo2=new Path(sdir2,"foo2");
hdfs.mkdirs(foo2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo3=new Path(sdir3,"foo3");
hdfs.rename(foo2,foo3);
INode foo3Node=fsdir.getINode4Write(foo3.toString());
assertTrue(foo3Node.isReference());
INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3=spy(dir3);
doReturn(false).when(mockDir3).addChild((INode)Mockito.isNull(),anyBoolean(),Mockito.anyInt());
Mockito.when(mockDir3.addChild((INode)Mockito.isNotNull(),anyBoolean(),Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
INodeDirectory root=fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3,mockDir3,fsdir.getINodeMap());
foo3Node.setParent(mockDir3);
try {
hdfs.rename(foo,foo3,Rename.OVERWRITE);
fail("the rename from " + foo + " to "+ foo3+ " should fail");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("rename from " + foo + " to "+ foo3+ " failed.",e);
}
final INode foo3Node_undo=fsdir.getINode4Write(foo3.toString());
assertSame(foo3Node,foo3Node_undo);
INodeReference.WithCount foo3_wc=(WithCount)foo3Node.asReference().getReferredINode();
assertEquals(2,foo3_wc.getReferenceCount());
assertSame(foo3Node,foo3_wc.getParentReference());
}
InternalCallVerifier BooleanVerifier
/**
* Rename a file under a snapshottable directory, file does not exist
* in a snapshot.
*/
@Test(timeout=60000) public void testRenameFileNotInSnapshot() throws Exception {
hdfs.mkdirs(sub1);
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1,snap1);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPL,SEED);
hdfs.rename(file1,file2);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,snap1,"");
List entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.CREATE,file2.getName(),null));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.rename(foo2,foo);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(9,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
final INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(2,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
assertEquals(bar2.getName(),children.get(1).getLocalName());
assertEquals(bar3.getName(),children.get(2).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(2,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
final INode fooRef2=fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2=(WithCount)fooRef2.asReference().getReferredINode();
assertSame(wc,wc2);
assertSame(fooRef2,wc.getParentReference());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test rename a dir multiple times across snapshottable directories:
* /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
* Create snapshots after each rename.
*/
@Test public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path sdir3=new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1=new Path(sdir1,"foo");
final Path bar1_dir1=new Path(foo_dir1,"bar1");
final Path bar_dir1=new Path(sdir1,"bar");
DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar_dir1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3");
final Path foo_dir2=new Path(sdir2,"foo");
hdfs.rename(foo_dir1,foo_dir2);
final Path bar_dir2=new Path(sdir2,"bar");
hdfs.rename(bar_dir1,bar_dir2);
final Path bar1_dir2=new Path(foo_dir2,"bar1");
hdfs.setReplication(bar1_dir2,REPL_1);
hdfs.setReplication(bar_dir2,REPL_1);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s11");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s22");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s33");
final Path foo_dir3=new Path(sdir3,"foo");
hdfs.rename(foo_dir2,foo_dir3);
final Path bar_dir3=new Path(sdir3,"bar");
hdfs.rename(bar_dir2,bar_dir3);
final Path bar1_dir3=new Path(foo_dir3,"bar1");
hdfs.setReplication(bar1_dir3,REPL_2);
hdfs.setReplication(bar_dir3,REPL_2);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s222");
SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s333");
final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1");
final Path bar1_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","foo/bar1");
final Path bar1_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","foo/bar1");
final Path bar_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar");
final Path bar_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","bar");
final Path bar_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
FileStatus statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
FileStatus statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir3);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
hdfs.rename(foo_dir3,foo_dir2);
hdfs.rename(bar_dir3,bar_dir2);
hdfs.setReplication(bar1_dir2,REPL);
hdfs.setReplication(bar_dir2,REPL);
restartClusterAndCheckImage(true);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1111");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2222");
final Path bar1_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo/bar1");
final Path bar_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
statusBar1=hdfs.getFileStatus(bar1_s1);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2,statusBar1.getReplication());
statusBar1=hdfs.getFileStatus(bar1_s2222);
assertEquals(REPL,statusBar1.getReplication());
statusBar=hdfs.getFileStatus(bar_s1);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_dir2);
assertEquals(REPL,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2,statusBar.getReplication());
statusBar=hdfs.getFileStatus(bar_s2222);
assertEquals(REPL,statusBar.getReplication());
hdfs.rename(foo_dir2,foo_dir1);
hdfs.rename(bar_dir2,bar_dir1);
INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory();
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
INodeDirectory sdir3Node=fsdir.getINode(sdir3.toString()).asDirectory();
INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference();
INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(5,fooWithCount.getReferenceCount());
INodeDirectory foo=fooWithCount.asDirectory();
List fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
Snapshot s2222=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
Snapshot s333=sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
Snapshot s22=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),fooDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),fooDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId());
INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile();
List bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
assertEquals(s22.getId(),bar1Diffs.get(1).getSnapshotId());
assertEquals(s1.getId(),bar1Diffs.get(0).getSnapshotId());
INodeReference barRef=fsdir.getINode4Write(bar_dir1.toString()).asReference();
INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(5,barWithCount.getReferenceCount());
INodeFile bar=barWithCount.asFile();
List barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(),barDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(),barDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(),barDiffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.delete(foo_dir1,true);
hdfs.delete(bar_dir1,true);
restartClusterAndCheckImage(true);
final Path bar1_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","foo/bar1");
final Path bar_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertFalse(hdfs.exists(bar1_s1111));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
assertFalse(hdfs.exists(bar_s1111));
final Path foo_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo");
fooRef=fsdir.getINode(foo_s2222.toString()).asReference();
fooWithCount=(WithCount)fooRef.getReferredINode();
assertEquals(4,fooWithCount.getReferenceCount());
foo=fooWithCount.asDirectory();
fooDiffs=foo.getDiffs().asList();
assertEquals(4,fooDiffs.size());
assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId());
bar1Diffs=bar1.getDiffs().asList();
assertEquals(3,bar1Diffs.size());
assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId());
barRef=fsdir.getINode(bar_s2222.toString()).asReference();
barWithCount=(WithCount)barRef.getReferredINode();
assertEquals(4,barWithCount.getReferenceCount());
bar=barWithCount.asFile();
barDiffs=bar.getDiffs().asList();
assertEquals(4,barDiffs.size());
assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test rename while the rename operation will exceed the quota in the dst
* tree.
*/
@Test public void testRenameUndo_5() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path subdir2=new Path(dir2,"subdir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subdir2);
final Path foo=new Path(dir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);
final Path foo2=new Path(subdir2,foo.getName());
boolean rename=hdfs.rename(foo,foo2);
assertFalse(rename);
assertTrue(hdfs.exists(foo));
assertTrue(hdfs.exists(bar));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
INode barNode=fsdir.getINode4Write(bar.toString());
assertTrue(barNode.getClass() == INodeFile.class);
assertSame(fooNode,barNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(3,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(subdir2.toString()));
diffList=dir2Node.getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
final Path foo2=new Path(sdir2,"foo");
hdfs.rename(foo,foo2);
final Path bar2=new Path(foo2,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
final Path bar3=new Path(foo2,"bar3");
DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir2,"s3");
hdfs.delete(foo2,true);
hdfs.deleteSnapshot(sdir2,"s3");
final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory();
Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q1.get(Quota.NAMESPACE));
final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory();
Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(2,q2.get(Quota.NAMESPACE));
final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName());
INode fooRef=fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode();
assertEquals(1,wc.getReferenceCount());
INodeDirectory fooNode=wc.getReferredINode().asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
assertEquals(bar.getName(),children.get(0).getLocalName());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(),diffList.get(0).getSnapshotId());
ChildrenDiff diff=diffList.get(0).getChildrenDiff();
assertEquals(0,diff.getList(ListType.CREATED).size());
assertEquals(0,diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Rename and deletion snapshot under the same the snapshottable directory.
*/
@Test public void testRenameDirAndDeleteSnapshot_6() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo=new Path(dir2,"foo");
final Path bar=new Path(foo,"bar");
final Path file=new Path(bar,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,test,"s0");
hdfs.delete(file,true);
final Path newfoo=new Path(dir1,foo.getName());
hdfs.rename(foo,newfoo);
final Path foo_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo");
assertTrue("the snapshot path " + foo_s0 + " should exist",hdfs.exists(foo_s0));
hdfs.deleteSnapshot(test,"s0");
assertFalse("after deleting s0, " + foo_s0 + " should not exist",hdfs.exists(foo_s0));
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue("the diff list of " + dir2 + " should be empty after deleting s0",dir2Node.getDiffs().asList().isEmpty());
assertTrue(hdfs.exists(newfoo));
INode fooRefNode=fsdir.getINode4Write(newfoo.toString());
assertTrue(fooRefNode instanceof INodeReference.DstReference);
INodeDirectory fooNode=fooRefNode.asDirectory();
assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.getDiffs().asList().isEmpty());
INodeDirectory barNode=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0).asDirectory();
assertTrue(barNode.getDiffs().asList().isEmpty());
assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
restartClusterAndCheckImage(true);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the rename undo when quota of dst tree is exceeded after rename.
*/
@Test public void testRenameExceedQuota() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path sub_dir2=new Path(dir2,"subdir");
final Path subfile_dir2=new Path(sub_dir2,"subfile");
hdfs.mkdirs(dir1);
DFSTestUtil.createFile(hdfs,subfile_dir2,BLOCKSIZE,REPL,SEED);
final Path foo=new Path(dir1,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1);
hdfs.rename(foo,subfile_dir2,Rename.OVERWRITE);
INode dir2Node=fsdir.getINode4Write(dir2.toString());
assertTrue(dir2Node.asDirectory().isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(7,counts.get(Quota.NAMESPACE));
assertEquals(BLOCKSIZE * REPL * 2,counts.get(Quota.DISKSPACE));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test the rename undo when removing dst node fails
*/
@Test public void testRenameUndo_6() throws Exception {
final Path test=new Path("/test");
final Path dir1=new Path(test,"dir1");
final Path dir2=new Path(test,"dir2");
final Path sub_dir2=new Path(dir2,"subdir");
final Path subsub_dir2=new Path(sub_dir2,"subdir");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subsub_dir2);
final Path foo=new Path(dir1,"foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
hdfs.setQuota(dir2,4,Long.MAX_VALUE - 1);
try {
hdfs.rename(foo,subsub_dir2,Rename.OVERWRITE);
fail("Expect QuotaExceedException");
}
catch ( QuotaExceededException e) {
String msg="Failed to record modification for snapshot: " + "The NameSpace quota (directories and files)" + " is exceeded: quota=4 file count=5";
GenericTestUtils.assertExceptionContains(msg,e);
}
assertTrue(hdfs.exists(foo));
INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory();
List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode fooNode=childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
assertSame(dir1Node,fooNode.getParent());
List diffList=dir1Node.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
Quota.Counts counts=dir2Node.computeQuotaUsage();
assertEquals(4,counts.get(Quota.NAMESPACE));
assertEquals(0,counts.get(Quota.DISKSPACE));
childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1,childrenList.size());
INode subdir2Node=childrenList.get(0);
assertTrue(subdir2Node.asDirectory().isWithSnapshot());
assertSame(dir2Node,subdir2Node.getParent());
assertSame(subdir2Node,fsdir.getINode4Write(sub_dir2.toString()));
INode subsubdir2Node=fsdir.getINode4Write(subsub_dir2.toString());
assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
assertSame(subdir2Node,subsubdir2Node.getParent());
diffList=(dir2Node).getDiffs().asList();
assertEquals(1,diffList.size());
diff=diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
diffList=subdir2Node.asDirectory().getDiffs().asList();
assertEquals(0,diffList.size());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testRenameFileInSubDirOfDirWithSnapshot() throws Exception {
final Path sub2=new Path(sub1,"sub2");
final Path sub2file1=new Path(sub2,"sub2file1");
final Path sub2file2=new Path(sub2,"sub2file2");
final String sub1snap1="sub1snap1";
hdfs.mkdirs(sub1);
hdfs.mkdirs(sub2);
DFSTestUtil.createFile(hdfs,sub2file1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sub1,sub1snap1);
hdfs.rename(sub2file1,sub2file2);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,sub1snap1,"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List entries=diffReport.getDiffList();
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,sub2.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,sub2.getName() + "/" + sub2file1.getName(),sub2.getName() + "/" + sub2file2.getName()));
}
InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
/**
* This test demonstrates that {@link INodeDirectory#removeChild(INode,Snapshot)}and {@link INodeDirectory#addChild(INode,boolean,Snapshot)}should use {@link INode#isInLatestSnapshot(Snapshot)} to check if the
* added/removed child should be recorded in snapshots.
*/
@Test public void testRenameDirAndDeleteSnapshot_5() throws Exception {
final Path dir1=new Path("/dir1");
final Path dir2=new Path("/dir2");
final Path dir3=new Path("/dir3");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
hdfs.mkdirs(dir3);
final Path foo=new Path(dir1,"foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
hdfs.deleteSnapshot(dir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2");
final Path foo2=new Path(dir2,foo.getName());
hdfs.rename(foo,foo2);
final Path bar2=new Path(dir2,"foo/bar");
final Path bar3=new Path(dir3,"bar");
hdfs.rename(bar2,bar3);
hdfs.delete(foo2,true);
assertTrue(hdfs.exists(bar3));
INodeFile barNode=(INodeFile)fsdir.getINode4Write(bar3.toString());
assertSame(fsdir.getINode4Write(dir3.toString()),barNode.getParent());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test rename where the src/dst directories are both snapshottable
* directories without snapshots. In such case we need to update the
* snapshottable dir list in SnapshotManager.
*/
@Test(timeout=60000) public void testRenameAndUpdateSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(sdir2,"bar");
hdfs.mkdirs(foo);
hdfs.mkdirs(bar);
hdfs.allowSnapshot(foo);
SnapshotTestHelper.createSnapshot(hdfs,bar,snap1);
assertEquals(2,fsn.getSnapshottableDirListing().length);
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
long fooId=fooNode.getId();
try {
hdfs.rename(foo,bar,Rename.OVERWRITE);
fail("Expect exception since " + bar + " is snapshottable and already has snapshots");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains(bar.toString() + " is snapshottable and already has snapshots",e);
}
hdfs.deleteSnapshot(bar,snap1);
hdfs.rename(foo,bar,Rename.OVERWRITE);
SnapshottableDirectoryStatus[] dirs=fsn.getSnapshottableDirListing();
assertEquals(1,dirs.length);
assertEquals(bar,dirs[0].getFullPath());
assertEquals(fooId,dirs[0].getDirStatus().getFileId());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test rename to an invalid name (xxx/.snapshot)
*/
@Test public void testRenameUndo_7() throws Exception {
final Path root=new Path("/");
final Path foo=new Path(root,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,root,snap1);
final Path invalid=new Path(foo,HdfsConstants.DOT_SNAPSHOT_DIR);
try {
hdfs.rename(bar,invalid);
fail("expect exception since invalid name is used for rename");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name",e);
}
INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1,children.size());
List diffList=fooNode.getDiffs().asList();
assertEquals(1,diffList.size());
DirectoryDiff diff=diffList.get(0);
Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
assertEquals(s1.getId(),diff.getSnapshotId());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
assertSame(barNode,children.get(0));
assertSame(fooNode,barNode.getParent());
List barDiffList=barNode.getDiffs().asList();
assertEquals(1,barDiffList.size());
FileDiff barDiff=barDiffList.get(0);
assertEquals(s1.getId(),barDiff.getSnapshotId());
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPL).build();
cluster.waitActive();
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Rename a single file across snapshottable dirs.
*/
@Test(timeout=60000) public void testRenameFileAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
FileStatus status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(s2.getId(),sfoo.getDiffs().getLastSnapshotId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After the following steps:
*
* 1. Take snapshot s1 on /dir1 at time t1.
* 2. Take snapshot s2 on /dir2 at time t2.
* 3. Modify the subtree of /dir2/foo/ to make it a dir with snapshots.
* 4. Take snapshot s3 on /dir1 at time t3.
* 5. Rename /dir2/foo/ to /dir1/foo/.
*
* When changes happening on foo, the diff should be recorded in snapshot s2.
*/
@Test(timeout=60000) public void testRenameDirAcrossSnapshottableDirs() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo,"bar2");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.setReplication(bar2,REPL_1);
hdfs.delete(bar,true);
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
final Path snapshotBar=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(snapshotBar));
final Path newBar2=new Path(newfoo,"bar2");
assertTrue(hdfs.exists(newBar2));
hdfs.delete(newBar2,true);
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
FileStatus status=hdfs.getFileStatus(bar2_s2);
assertEquals(REPL,status.getReplication());
final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testRenameTwiceInSnapshot() throws Exception {
hdfs.mkdirs(sub1);
hdfs.allowSnapshot(sub1);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sub1,snap1);
hdfs.rename(file1,file2);
hdfs.createSnapshot(sub1,snap2);
hdfs.rename(file2,file3);
SnapshotDiffReport diffReport;
diffReport=hdfs.getSnapshotDiffReport(sub1,snap1,snap2);
LOG.info("DiffList is " + diffReport.toString());
List entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,file1.getName(),file2.getName()));
diffReport=hdfs.getSnapshotDiffReport(sub1,snap2,"");
LOG.info("DiffList is " + diffReport.toString());
entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,file2.getName(),file3.getName()));
diffReport=hdfs.getSnapshotDiffReport(sub1,snap1,"");
LOG.info("DiffList is " + diffReport.toString());
entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,file1.getName(),file3.getName()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* After rename, delete the snapshot in src
*/
@Test public void testRenameDirAndDeleteSnapshot_2() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
restartClusterAndCheckImage(true);
final Path bar2=new Path(newfoo,"bar2");
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir1,"s4");
hdfs.delete(newfoo,true);
final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar");
assertTrue(hdfs.exists(bar_s4));
hdfs.deleteSnapshot(sdir1,"s4");
restartClusterAndCheckImage(true);
Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar");
assertTrue(hdfs.exists(bar_s3));
Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
hdfs.deleteSnapshot(sdir2,"s3");
final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(bar_s2));
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
INodeReference fooRef=fsdir.getINode(foo_s2.toString()).asReference();
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount fooWC=(WithCount)fooRef.getReferredINode();
assertEquals(1,fooWC.getReferenceCount());
INodeDirectory fooDir=fooWC.getReferredINode().asDirectory();
List diffs=fooDir.getDiffs().asList();
assertEquals(1,diffs.size());
assertEquals(s2.getId(),diffs.get(0).getSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(bar_s2));
restartClusterAndCheckImage(true);
Quota.Counts q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(4,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3,q.get(Quota.NAMESPACE));
assertEquals(0,q.get(Quota.DISKSPACE));
}
InternalCallVerifier BooleanVerifier
/**
* Test rename from a non-snapshottable dir to a snapshottable dir
*/
@Test(timeout=60000) public void testRenameFromNonSDir2SDir() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir1,"foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir2,snap1);
final Path newfoo=new Path(sdir2,"foo");
hdfs.rename(foo,newfoo);
INode fooNode=fsdir.getINode4Write(newfoo.toString());
assertTrue(fooNode instanceof INodeDirectory);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRenameWithOverWrite() throws Exception {
final Path root=new Path("/");
final Path foo=new Path(root,"foo");
final Path file1InFoo=new Path(foo,"file1");
final Path file2InFoo=new Path(foo,"file2");
final Path file3InFoo=new Path(foo,"file3");
DFSTestUtil.createFile(hdfs,file1InFoo,1L,REPL,SEED);
DFSTestUtil.createFile(hdfs,file2InFoo,1L,REPL,SEED);
DFSTestUtil.createFile(hdfs,file3InFoo,1L,REPL,SEED);
final Path bar=new Path(root,"bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs,root,"s0");
final Path fileInBar=new Path(bar,"file1");
hdfs.rename(file1InFoo,fileInBar);
final Path newDir=new Path(root,"newDir");
hdfs.rename(bar,newDir);
final Path file2InNewDir=new Path(newDir,"file2");
hdfs.rename(file2InFoo,file2InNewDir);
final Path file1InNewDir=new Path(newDir,"file1");
hdfs.rename(file3InFoo,file1InNewDir,Rename.OVERWRITE);
SnapshotTestHelper.createSnapshot(hdfs,root,"s1");
SnapshotDiffReport report=hdfs.getSnapshotDiffReport(root,"s0","s1");
LOG.info("DiffList is \n\"" + report.toString() + "\"");
List entries=report.getDiffList();
assertEquals(7,entries.size());
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,foo.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,bar.getName(),null));
assertTrue(existsInDiffReport(entries,DiffType.DELETE,"foo/file1",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"bar","newDir"));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file2","newDir/file2"));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file3","newDir/file1"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test renaming a file and then delete snapshots.
*/
@Test public void testRenameFileAndDeleteSnapshot() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
hdfs.setReplication(newfoo,REPL_1);
hdfs.createSnapshot(sdir1,"s4");
hdfs.setReplication(newfoo,REPL_2);
FileStatus status=hdfs.getFileStatus(newfoo);
assertEquals(REPL_2,status.getReplication());
final Path foo_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo");
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.createSnapshot(sdir1,"s5");
final Path foo_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo");
status=hdfs.getFileStatus(foo_s5);
assertEquals(REPL_2,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(foo_s5));
status=hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1,status.getReplication());
hdfs.deleteSnapshot(sdir1,"s4");
assertFalse(hdfs.exists(foo_s4));
Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo");
assertFalse(hdfs.exists(foo_s3));
final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo");
assertTrue(hdfs.exists(foo_s2));
status=hdfs.getFileStatus(foo_s2);
assertEquals(REPL,status.getReplication());
INodeFile snode=fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1,snode.getDiffs().asList().size());
INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(),snode.getDiffs().getLastSnapshotId());
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(foo_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test renaming a dir and then delete snapshots.
*/
@Test public void testRenameDirAndDeleteSnapshot_1() throws Exception {
final Path sdir1=new Path("/dir1");
final Path sdir2=new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo=new Path(sdir2,"foo");
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo,"bar2");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED);
DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1");
SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2");
hdfs.createSnapshot(sdir1,"s3");
final Path newfoo=new Path(sdir1,"foo");
hdfs.rename(foo,newfoo);
final Path newbar=new Path(newfoo,bar.getName());
final Path newbar2=new Path(newfoo,bar2.getName());
final Path newbar3=new Path(newfoo,"bar3");
DFSTestUtil.createFile(hdfs,newbar3,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sdir1,"s4");
hdfs.delete(newbar,true);
hdfs.delete(newbar3,true);
assertFalse(hdfs.exists(newbar3));
assertFalse(hdfs.exists(bar));
final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar");
final Path bar3_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar3");
assertTrue(hdfs.exists(bar_s4));
assertTrue(hdfs.exists(bar3_s4));
hdfs.createSnapshot(sdir1,"s5");
hdfs.delete(newbar2,true);
assertFalse(hdfs.exists(bar2));
final Path bar2_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo/bar2");
assertTrue(hdfs.exists(bar2_s5));
hdfs.deleteSnapshot(sdir1,"s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(bar2_s5));
final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
hdfs.deleteSnapshot(sdir1,"s4");
assertFalse(hdfs.exists(bar_s4));
Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar");
assertFalse(hdfs.exists(bar_s3));
final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar");
assertTrue(hdfs.exists(bar_s2));
assertFalse(hdfs.exists(bar2_s4));
Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar3_s4));
Path bar3_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar3");
assertFalse(hdfs.exists(bar3_s3));
bar3_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar3");
assertFalse(hdfs.exists(bar3_s3));
final Path bar3_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar3");
assertFalse(hdfs.exists(bar3_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir2,"s2");
assertFalse(hdfs.exists(bar_s2));
assertFalse(hdfs.exists(bar2_s2));
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1,"s1");
restartClusterAndCheckImage(true);
}
InternalCallVerifier BooleanVerifier
/**
* Rename a file under a snapshottable directory, file exists
* in a snapshot.
*/
@Test public void testRenameFileInSnapshot() throws Exception {
hdfs.mkdirs(sub1);
hdfs.allowSnapshot(sub1);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPL,SEED);
hdfs.createSnapshot(sub1,snap1);
hdfs.rename(file1,file2);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,snap1,"");
System.out.println("DiffList is " + diffReport.toString());
List entries=diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,file1.getName(),file2.getName()));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRenameDirectoryInSnapshot() throws Exception {
final Path sub2=new Path(sub1,"sub2");
final Path sub3=new Path(sub1,"sub3");
final Path sub2file1=new Path(sub2,"sub2file1");
final String sub1snap1="sub1snap1";
hdfs.mkdirs(sub1);
hdfs.mkdirs(sub2);
DFSTestUtil.createFile(hdfs,sub2file1,BLOCKSIZE,REPL,SEED);
SnapshotTestHelper.createSnapshot(hdfs,sub1,sub1snap1);
hdfs.rename(sub2,sub3);
SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,sub1snap1,"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List entries=diffReport.getDiffList();
assertEquals(2,entries.size());
assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null));
assertTrue(existsInDiffReport(entries,DiffType.RENAME,sub2.getName(),sub3.getName()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test clear quota of a snapshottable dir or a dir with snapshot.
*/
@Test public void testClearQuota() throws Exception {
final Path dir=new Path("/TestSnapshot");
hdfs.mkdirs(dir);
hdfs.allowSnapshot(dir);
hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET,HdfsConstants.QUOTA_DONT_SET);
INodeDirectory dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0,dirNode.getDiffs().asList().size());
hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET - 1,HdfsConstants.QUOTA_DONT_SET - 1);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0,dirNode.getDiffs().asList().size());
hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0,dirNode.getDiffs().asList().size());
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(1,dirNode.getDiffs().asList().size());
SnapshottableDirectoryStatus[] status=hdfs.getSnapshottableDirListing();
assertEquals(1,status.length);
assertEquals(dir,status[0].getFullPath());
final Path subDir=new Path(dir,"sub");
hdfs.mkdirs(subDir);
hdfs.createSnapshot(dir,"s2");
final Path file=new Path(subDir,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET);
INode subNode=fsdir.getINode4Write(subDir.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
List diffList=subNode.asDirectory().getDiffs().asList();
assertEquals(1,diffList.size());
Snapshot s2=dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(),diffList.get(0).getSnapshotId());
List createdList=diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1,createdList.size());
assertSame(fsdir.getINode4Write(file.toString()),createdList.get(0));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testSetQuota() throws Exception {
final Path dir=new Path("/TestSnapshot");
hdfs.mkdirs(dir);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
Path sub=new Path(dir,"sub");
hdfs.mkdirs(sub);
Path fileInSub=new Path(sub,"file");
DFSTestUtil.createFile(hdfs,fileInSub,BLOCKSIZE,REPLICATION,seed);
INodeDirectory subNode=INodeDirectory.valueOf(fsdir.getINode(sub.toString()),sub);
assertFalse(subNode.isWithSnapshot());
hdfs.setQuota(sub,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1);
subNode=INodeDirectory.valueOf(fsdir.getINode(sub.toString()),sub);
assertTrue(subNode.isQuotaSet());
assertFalse(subNode.isWithSnapshot());
}
InternalCallVerifier NullVerifier
/**
* Test if the OfflineImageViewerPB can correctly parse a fsimage containing
* snapshots
*/
@Test public void testOfflineImageViewer() throws Exception {
runTestSnapshot(1);
File originalFsimage=FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
assertNotNull("Didn't generate or can't find fsimage",originalFsimage);
StringWriter output=new StringWriter();
PrintWriter o=new PrintWriter(output);
PBImageXmlWriter v=new PBImageXmlWriter(new Configuration(),o);
v.visit(new RandomAccessFile(originalFsimage,"r"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* A simple test that updates a sub-directory of a snapshottable directory
* with snapshots
*/
@Test(timeout=60000) public void testUpdateDirectory() throws Exception {
Path dir=new Path("/dir");
Path sub=new Path(dir,"sub");
Path subFile=new Path(sub,"file");
DFSTestUtil.createFile(hdfs,subFile,BLOCKSIZE,REPLICATION,seed);
FileStatus oldStatus=hdfs.getFileStatus(sub);
hdfs.allowSnapshot(dir);
hdfs.createSnapshot(dir,"s1");
hdfs.setTimes(sub,100L,100L);
Path snapshotPath=SnapshotTestHelper.getSnapshotPath(dir,"s1","sub");
FileStatus snapshotStatus=hdfs.getFileStatus(snapshotPath);
assertEquals(oldStatus.getModificationTime(),snapshotStatus.getModificationTime());
assertEquals(oldStatus.getAccessTime(),snapshotStatus.getAccessTime());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test multiple calls of allowSnapshot and disallowSnapshot, to make sure
* they are idempotent
*/
@Test public void testAllowAndDisallowSnapshot() throws Exception {
final Path dir=new Path("/dir");
final Path file0=new Path(dir,"file0");
final Path file1=new Path(dir,"file1");
DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
INodeDirectory dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
hdfs.allowSnapshot(dir);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
hdfs.allowSnapshot(dir);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
hdfs.disallowSnapshot(dir);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
hdfs.disallowSnapshot(dir);
dirNode=fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
final Path root=new Path("/");
INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
hdfs.allowSnapshot(root);
rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
hdfs.allowSnapshot(root);
rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
hdfs.disallowSnapshot(root);
rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
hdfs.disallowSnapshot(root);
rootNode=fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception {
final Path foo=new Path("/foo");
final Path foo2=new Path("/foo2");
hdfs.mkdirs(foo);
hdfs.mkdirs(foo2);
hdfs.allowSnapshot(foo);
hdfs.allowSnapshot(foo2);
final Path bar=new Path(foo,"bar");
final Path bar2=new Path(foo2,"bar");
DFSTestUtil.createFile(hdfs,bar,100,(short)2,100024L);
hdfs.createSnapshot(foo,"s1");
assertTrue(hdfs.rename(bar,bar2));
hdfs.createSnapshot(foo2,"s2");
assertTrue(hdfs.delete(bar2,true));
NameNode nameNode=cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode,false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
String barSnapshotPath=Snapshot.getSnapshotPath(foo.toString(),"s1/bar");
DFSTestUtil.readFile(hdfs,new Path(barSnapshotPath));
String bar2SnapshotPath=Snapshot.getSnapshotPath(foo2.toString(),"s2/bar");
DFSTestUtil.readFile(hdfs,new Path(bar2SnapshotPath));
}
InternalCallVerifier EqualityVerifier
/**
* Make sure that a delete of a non-zero-length file which results in a
* zero-length file in a snapshot works.
*/
@Test public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
final Path foo=new Path("/foo");
final Path bar=new Path(foo,"bar");
final byte[] testData="foo bar baz".getBytes();
DFSTestUtil.createFile(hdfs,bar,0,REPLICATION,0L);
assertEquals(0,fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s0");
FSDataOutputStream out=hdfs.append(bar);
out.write(testData);
out.close();
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(testData.length,blks[0].getNumBytes());
hdfs.delete(bar,true);
cluster.getNameNode().getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false);
cluster.getNameNode().getRpcServer().saveNamespace();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
*/
@Test public void testDeletionWithZeroSizeBlock() throws Exception {
final Path foo=new Path("/foo");
final Path bar=new Path(foo,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s0");
hdfs.append(bar);
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]);
cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s1");
barNode=fsdir.getINode4Write(bar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(2,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
assertEquals(0,blks[1].getNumBytes());
hdfs.delete(bar,true);
final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1",bar.getName());
barNode=fsdir.getINode(sbar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testReadSnapshotFileWithCheckpoint() throws Exception {
Path foo=new Path("/foo");
hdfs.mkdirs(foo);
hdfs.allowSnapshot(foo);
Path bar=new Path("/foo/bar");
DFSTestUtil.createFile(hdfs,bar,100,(short)2,100024L);
hdfs.createSnapshot(foo,"s1");
assertTrue(hdfs.delete(bar,true));
NameNode nameNode=cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode,false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
String snapshotPath=Snapshot.getSnapshotPath(foo.toString(),"s1/bar");
DFSTestUtil.readFile(hdfs,new Path(snapshotPath));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* 1. rename under-construction file with 0-sized blocks after snapshot.
* 2. delete the renamed directory.
* make sure we delete the 0-sized block.
* see HDFS-5476.
*/
@Test public void testDeletionWithZeroSizeBlock3() throws Exception {
final Path foo=new Path("/foo");
final Path subDir=new Path(foo,"sub");
final Path bar=new Path(subDir,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L);
hdfs.append(bar);
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]);
cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s1");
final Path bar2=new Path(subDir,"bar2");
hdfs.rename(bar,bar2);
INodeFile bar2Node=fsdir.getINode4Write(bar2.toString()).asFile();
blks=bar2Node.getBlocks();
assertEquals(2,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
assertEquals(0,blks[1].getNumBytes());
hdfs.delete(subDir,true);
final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1","sub/bar");
barNode=fsdir.getINode(sbar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Make sure we delete 0-sized block when deleting an under-construction file
*/
@Test public void testDeletionWithZeroSizeBlock2() throws Exception {
final Path foo=new Path("/foo");
final Path subDir=new Path(foo,"sub");
final Path bar=new Path(subDir,"bar");
DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L);
hdfs.append(bar);
INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks=barNode.getBlocks();
assertEquals(1,blks.length);
ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]);
cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null);
SnapshotTestHelper.createSnapshot(hdfs,foo,"s1");
barNode=fsdir.getINode4Write(bar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(2,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
assertEquals(0,blks[1].getNumBytes());
hdfs.delete(subDir,true);
final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1","sub/bar");
barNode=fsdir.getINode(sbar.toString()).asFile();
blks=barNode.getBlocks();
assertEquals(1,blks.length);
assertEquals(BLOCKSIZE,blks[0].getNumBytes());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier HybridVerifier
/**
* Test deleting a file with snapshots. Need to check the blocksMap to make
* sure the corresponding record is updated correctly.
*/
@Test(timeout=60000) public void testDeletionWithSnapshots() throws Exception {
Path file0=new Path(sub1,"file0");
Path file1=new Path(sub1,"file1");
Path sub2=new Path(sub1,"sub2");
Path file2=new Path(sub2,"file2");
Path file3=new Path(sub1,"file3");
Path file4=new Path(sub1,"file4");
Path file5=new Path(sub1,"file5");
DFSTestUtil.createFile(hdfs,file0,4 * BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file1,2 * BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file2,3 * BLOCKSIZE,REPLICATION,seed);
{
final INodeFile f2=assertBlockCollection(file2.toString(),3,fsdir,blockmanager);
BlockInfo[] blocks=f2.getBlocks();
hdfs.delete(sub2,true);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
}
final String[] snapshots={"s0","s1","s2"};
DFSTestUtil.createFile(hdfs,file3,5 * BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[0]);
DFSTestUtil.createFile(hdfs,file4,1 * BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[1]);
DFSTestUtil.createFile(hdfs,file5,7 * BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,sub1,snapshots[2]);
{
INodeFile f1=assertBlockCollection(file1.toString(),2,fsdir,blockmanager);
Assert.assertSame(INodeFile.class,f1.getClass());
hdfs.setReplication(file1,(short)2);
f1=assertBlockCollection(file1.toString(),2,fsdir,blockmanager);
assertTrue(f1.isWithSnapshot());
assertFalse(f1.isUnderConstruction());
}
final INodeFile f0=assertBlockCollection(file0.toString(),4,fsdir,blockmanager);
BlockInfo[] blocks0=f0.getBlocks();
Path snapshotFile0=SnapshotTestHelper.getSnapshotPath(sub1,"s0",file0.getName());
assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager);
hdfs.delete(file0,true);
for ( BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager);
String s1f0=SnapshotTestHelper.getSnapshotPath(sub1,"s1",file0.getName()).toString();
assertBlockCollection(s1f0,4,fsdir,blockmanager);
hdfs.deleteSnapshot(sub1,"s1");
for ( BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(),4,fsdir,blockmanager);
try {
INodeFile.valueOf(fsdir.getINode(s1f0),s1f0);
fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
}
catch ( IOException e) {
assertExceptionContains("File does not exist: " + s1f0,e);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* When combine two snapshots, make sure files/directories created after the
* prior snapshot get destroyed.
*/
@Test(timeout=300000) public void testCombineSnapshotDiff3() throws Exception {
Path dir=new Path("/dir");
Path subDir1=new Path(dir,"subdir1");
Path subDir2=new Path(dir,"subdir2");
hdfs.mkdirs(subDir2);
Path subsubDir=new Path(subDir1,"subsubdir");
hdfs.mkdirs(subsubDir);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
Path newDir=new Path(subsubDir,"newdir");
Path newFile=new Path(newDir,"newfile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
Path newFile2=new Path(subDir2,"newfile");
DFSTestUtil.createFile(hdfs,newFile2,BLOCKSIZE,REPLICATION,seed);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s2");
checkQuotaUsageComputation(dir,11,BLOCKSIZE * 2 * REPLICATION);
hdfs.delete(subsubDir,true);
hdfs.delete(subDir2,true);
checkQuotaUsageComputation(dir,14,BLOCKSIZE * 2 * REPLICATION);
hdfs.deleteSnapshot(dir,"s2");
checkQuotaUsageComputation(dir,8,0);
Path subdir1_s1=SnapshotTestHelper.getSnapshotPath(dir,"s1",subDir1.getName());
Path subdir1_s2=SnapshotTestHelper.getSnapshotPath(dir,"s2",subDir1.getName());
assertTrue(hdfs.exists(subdir1_s1));
assertFalse(hdfs.exists(subdir1_s2));
}
InternalCallVerifier EqualityVerifier
/**
* Test deleting snapshots with modification on the metadata of directory
*/
@Test(timeout=300000) public void testDeleteSnapshotWithDirModification() throws Exception {
Path file=new Path(sub,"file");
DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed);
hdfs.setOwner(sub,"user1","group1");
SnapshotTestHelper.createSnapshot(hdfs,sub,"s1");
checkQuotaUsageComputation(sub,3,BLOCKSIZE * 3);
hdfs.setOwner(sub,"user2","group2");
checkQuotaUsageComputation(sub,3,BLOCKSIZE * 3);
hdfs.createSnapshot(sub,"s2");
checkQuotaUsageComputation(sub,4,BLOCKSIZE * 3);
hdfs.createSnapshot(sub,"s3");
checkQuotaUsageComputation(sub,5,BLOCKSIZE * 3);
hdfs.setOwner(sub,"user3","group3");
checkQuotaUsageComputation(sub,5,BLOCKSIZE * 3);
hdfs.deleteSnapshot(sub,"s3");
checkQuotaUsageComputation(sub,4,BLOCKSIZE * 3);
FileStatus statusOfS2=hdfs.getFileStatus(new Path(sub,HdfsConstants.DOT_SNAPSHOT_DIR + "/s2"));
assertEquals("user2",statusOfS2.getOwner());
assertEquals("group2",statusOfS2.getGroup());
hdfs.deleteSnapshot(sub,"s2");
checkQuotaUsageComputation(sub,3,BLOCKSIZE * 3);
FileStatus statusOfS1=hdfs.getFileStatus(new Path(sub,HdfsConstants.DOT_SNAPSHOT_DIR + "/s1"));
assertEquals("user1",statusOfS1.getOwner());
assertEquals("group1",statusOfS1.getGroup());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test deleting the earliest (first) snapshot. In this simplest scenario, the
* snapshots are taken on the same directory, and we do not need to combine
* snapshot diffs.
*/
@Test(timeout=300000) public void testDeleteEarliestSnapshot1() throws Exception {
Path file0=new Path(sub,"file0");
Path file1=new Path(sub,"file1");
DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
String snapshotName="s1";
try {
hdfs.deleteSnapshot(sub,snapshotName);
fail("SnapshotException expected: " + sub.toString() + " is not snapshottable yet");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + sub,e);
}
hdfs.allowSnapshot(sub);
try {
hdfs.deleteSnapshot(sub,snapshotName);
fail("SnapshotException expected: snapshot " + snapshotName + " does not exist for "+ sub.toString());
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Cannot delete snapshot " + snapshotName + " from path "+ sub.toString()+ ": the snapshot does not exist.",e);
}
SnapshotTestHelper.createSnapshot(hdfs,sub,snapshotName);
checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2);
hdfs.deleteSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,3,BLOCKSIZE * REPLICATION * 2);
hdfs.createSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2);
Path newFile=new Path(sub,"newFile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
String snapshotName2="s2";
hdfs.createSnapshot(sub,snapshotName2);
checkQuotaUsageComputation(sub,6,BLOCKSIZE * REPLICATION * 3);
Path ss=SnapshotTestHelper.getSnapshotPath(sub,snapshotName2,"newFile");
FileStatus statusBeforeDeletion=hdfs.getFileStatus(ss);
hdfs.deleteSnapshot(sub,snapshotName);
checkQuotaUsageComputation(sub,5,BLOCKSIZE * REPLICATION * 3);
FileStatus statusAfterDeletion=hdfs.getFileStatus(ss);
System.out.println("Before deletion: " + statusBeforeDeletion.toString() + "\n"+ "After deletion: "+ statusAfterDeletion.toString());
assertEquals(statusBeforeDeletion.toString(),statusAfterDeletion.toString());
}
UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test deleting the earliest (first) snapshot. In this more complicated
* scenario, the snapshots are taken across directories.
*
* The test covers the following scenarios:
* 1. delete the first diff in the diff list of a directory
* 2. delete the first diff in the diff list of a file
*
* Also, the recursive cleanTree process should cover both INodeFile and
* INodeDirectory.
*/
@Test(timeout=300000) public void testDeleteEarliestSnapshot2() throws Exception {
Path noChangeDir=new Path(sub,"noChangeDir");
Path noChangeFile=new Path(noChangeDir,"noChangeFile");
Path metaChangeFile=new Path(noChangeDir,"metaChangeFile");
Path metaChangeDir=new Path(noChangeDir,"metaChangeDir");
Path toDeleteFile=new Path(metaChangeDir,"toDeleteFile");
DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,metaChangeFile,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,toDeleteFile,BLOCKSIZE,REPLICATION,seed);
final INodeFile toDeleteFileNode=TestSnapshotBlocksMap.assertBlockCollection(toDeleteFile.toString(),1,fsdir,blockmanager);
BlockInfo[] blocks=toDeleteFileNode.getBlocks();
SnapshotTestHelper.createSnapshot(hdfs,dir,"s0");
checkQuotaUsageComputation(dir,8,3 * BLOCKSIZE * REPLICATION);
hdfs.delete(toDeleteFile,true);
checkQuotaUsageComputation(dir,10,3 * BLOCKSIZE * REPLICATION);
hdfs.setReplication(metaChangeFile,REPLICATION_1);
hdfs.setOwner(metaChangeDir,"unknown","unknown");
checkQuotaUsageComputation(dir,11,3 * BLOCKSIZE * REPLICATION);
hdfs.createSnapshot(dir,"s1");
checkQuotaUsageComputation(dir,12,3 * BLOCKSIZE * REPLICATION);
hdfs.deleteSnapshot(dir,"s0");
checkQuotaUsageComputation(dir,7,2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
final INodeDirectory dirNode=fsdir.getINode(dir.toString()).asDirectory();
Snapshot snapshot0=dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertNull(snapshot0);
Snapshot snapshot1=dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
DirectoryDiffList diffList=dirNode.getDiffs();
assertEquals(1,diffList.asList().size());
assertEquals(snapshot1.getId(),diffList.getLast().getSnapshotId());
diffList=fsdir.getINode(metaChangeDir.toString()).asDirectory().getDiffs();
assertEquals(0,diffList.asList().size());
final INodeDirectory noChangeDirNode=(INodeDirectory)fsdir.getINode(noChangeDir.toString());
assertEquals(INodeDirectory.class,noChangeDirNode.getClass());
final INodeFile noChangeFileNode=(INodeFile)fsdir.getINode(noChangeFile.toString());
assertEquals(INodeFile.class,noChangeFileNode.getClass());
TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(),1,fsdir,blockmanager);
FileStatus status=hdfs.getFileStatus(metaChangeDir);
assertEquals("unknown",status.getOwner());
assertEquals("unknown",status.getGroup());
status=hdfs.getFileStatus(metaChangeFile);
assertEquals(REPLICATION_1,status.getReplication());
TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(),1,fsdir,blockmanager);
try {
status=hdfs.getFileStatus(toDeleteFile);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFile.toString(),e);
}
final Path toDeleteFileInSnapshot=SnapshotTestHelper.getSnapshotPath(dir,"s0",toDeleteFile.toString().substring(dir.toString().length()));
try {
status=hdfs.getFileStatus(toDeleteFileInSnapshot);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFileInSnapshot.toString(),e);
}
}
InternalCallVerifier BooleanVerifier
@Test public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
System.setErr(psOut);
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv1={"-deleteSnapshot","/tmp"};
int val=shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString().contains(argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2={"-deleteSnapshot","/tmp","s1","s2"};
val=shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString().contains(argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
}
InternalCallVerifier EqualityVerifier
/**
* Test applying editlog of operation which deletes a snapshottable directory
* without snapshots. The snapshottable dir list in snapshot manager should be
* updated.
*/
@Test(timeout=300000) public void testApplyEditLogForDeletion() throws Exception {
final Path foo=new Path("/foo");
final Path bar1=new Path(foo,"bar1");
final Path bar2=new Path(foo,"bar2");
hdfs.mkdirs(bar1);
hdfs.mkdirs(bar2);
hdfs.allowSnapshot(bar1);
hdfs.allowSnapshot(bar2);
assertEquals(2,cluster.getNamesystem().getSnapshotManager().getNumSnapshottableDirs());
assertEquals(2,cluster.getNamesystem().getSnapshotManager().getSnapshottableDirs().length);
hdfs.delete(foo,true);
cluster.restartNameNode(0);
assertEquals(0,cluster.getNamesystem().getSnapshotManager().getNumSnapshottableDirs());
assertEquals(0,cluster.getNamesystem().getSnapshotManager().getSnapshottableDirs().length);
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test deleting a directory which is a descendant of a snapshottable
* directory. In the test we need to cover the following cases:
*
* 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
* 2. Delete current INodeFile/INodeDirectory while snapshots have been taken
* on ancestor(s).
* 3. Delete current INodeFileWithSnapshot.
* 4. Delete current INodeDirectoryWithSnapshot.
*
*/
@Test(timeout=300000) public void testDeleteCurrentFileDirectory() throws Exception {
Path deleteDir=new Path(subsub,"deleteDir");
Path deleteFile=new Path(deleteDir,"deleteFile");
Path noChangeDirParent=new Path(sub,"noChangeDirParent");
Path noChangeDir=new Path(noChangeDirParent,"noChangeDir");
Path noChangeFile=new Path(noChangeDir,"noChangeFile");
DFSTestUtil.createFile(hdfs,deleteFile,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed);
Path metaChangeFile1=new Path(subsub,"metaChangeFile1");
DFSTestUtil.createFile(hdfs,metaChangeFile1,BLOCKSIZE,REPLICATION,seed);
Path metaChangeFile2=new Path(noChangeDir,"metaChangeFile2");
DFSTestUtil.createFile(hdfs,metaChangeFile2,BLOCKSIZE,REPLICATION,seed);
hdfs.delete(deleteDir,true);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s0");
Path tempDir=new Path(dir,"tempdir");
Path tempFile=new Path(tempDir,"tempfile");
DFSTestUtil.createFile(hdfs,tempFile,BLOCKSIZE,REPLICATION,seed);
final INodeFile temp=TestSnapshotBlocksMap.assertBlockCollection(tempFile.toString(),1,fsdir,blockmanager);
BlockInfo[] blocks=temp.getBlocks();
hdfs.delete(tempDir,true);
checkQuotaUsageComputation(dir,9L,BLOCKSIZE * REPLICATION * 3);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
Path newFileAfterS0=new Path(subsub,"newFile");
DFSTestUtil.createFile(hdfs,newFileAfterS0,BLOCKSIZE,REPLICATION,seed);
hdfs.setReplication(metaChangeFile1,REPLICATION_1);
hdfs.setReplication(metaChangeFile2,REPLICATION_1);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s1");
checkQuotaUsageComputation(dir,14L,BLOCKSIZE * REPLICATION * 4);
Snapshot snapshot0=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s0"));
Snapshot snapshot1=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s1"));
hdfs.delete(noChangeDirParent,true);
checkQuotaUsageComputation(dir,17L,BLOCKSIZE * REPLICATION * 4);
Path snapshotNoChangeDir=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName() + "/" + noChangeDirParent.getName()+ "/"+ noChangeDir.getName());
INodeDirectory snapshotNode=(INodeDirectory)fsdir.getINode(snapshotNoChangeDir.toString());
assertEquals(INodeDirectory.class,snapshotNode.getClass());
ReadOnlyList children=snapshotNode.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(2,children.size());
INode noChangeFileSCopy=children.get(1);
assertEquals(noChangeFile.getName(),noChangeFileSCopy.getLocalName());
assertEquals(INodeFile.class,noChangeFileSCopy.getClass());
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,noChangeFileSCopy.getLocalName()).toString(),1,fsdir,blockmanager);
INodeFile metaChangeFile2SCopy=children.get(0).asFile();
assertEquals(metaChangeFile2.getName(),metaChangeFile2SCopy.getLocalName());
assertTrue(metaChangeFile2SCopy.isWithSnapshot());
assertFalse(metaChangeFile2SCopy.isUnderConstruction());
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,metaChangeFile2SCopy.getLocalName()).toString(),1,fsdir,blockmanager);
assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
Path newFile=new Path(sub,"newFile");
DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed);
final INodeFile newFileNode=TestSnapshotBlocksMap.assertBlockCollection(newFile.toString(),1,fsdir,blockmanager);
blocks=newFileNode.getBlocks();
checkQuotaUsageComputation(dir,18L,BLOCKSIZE * REPLICATION * 5);
hdfs.delete(sub,true);
checkQuotaUsageComputation(dir,19L,BLOCKSIZE * REPLICATION * 4);
for ( BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
Path snapshotSub=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName());
INodeDirectory snapshotNode4Sub=fsdir.getINode(snapshotSub.toString()).asDirectory();
assertTrue(snapshotNode4Sub.isWithSnapshot());
assertEquals(1,snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
assertEquals(2,snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
INode snapshotNode4Subsub=snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0);
assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
INodeDirectory snapshotSubsubDir=(INodeDirectory)snapshotNode4Subsub;
children=snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(2,children.size());
assertEquals(children.get(0).getLocalName(),metaChangeFile1.getName());
assertEquals(children.get(1).getLocalName(),newFileAfterS0.getName());
children=snapshotSubsubDir.getChildrenList(snapshot0.getId());
assertEquals(1,children.size());
INode child=children.get(0);
assertEquals(child.getLocalName(),metaChangeFile1.getName());
INodeFile metaChangeFile1SCopy=child.asFile();
assertTrue(metaChangeFile1SCopy.isWithSnapshot());
assertFalse(metaChangeFile1SCopy.isUnderConstruction());
assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* A test covering the case where the snapshot diff to be deleted is renamed
* to its previous snapshot.
*/
@Test(timeout=300000) public void testRenameSnapshotDiff() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path subFile0=new Path(sub,"file0");
final Path subsubFile0=new Path(subsub,"file0");
DFSTestUtil.createFile(hdfs,subFile0,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,subsubFile0,BLOCKSIZE,REPLICATION,seed);
hdfs.setOwner(subsub,"owner","group");
SnapshotTestHelper.createSnapshot(hdfs,sub,"s0");
checkQuotaUsageComputation(sub,5,BLOCKSIZE * 6);
final Path subFile1=new Path(sub,"file1");
final Path subsubFile1=new Path(subsub,"file1");
DFSTestUtil.createFile(hdfs,subFile1,BLOCKSIZE,REPLICATION_1,seed);
DFSTestUtil.createFile(hdfs,subsubFile1,BLOCKSIZE,REPLICATION,seed);
checkQuotaUsageComputation(sub,8,BLOCKSIZE * 11);
SnapshotTestHelper.createSnapshot(hdfs,sub,"s1");
checkQuotaUsageComputation(sub,9,BLOCKSIZE * 11);
SnapshotTestHelper.createSnapshot(hdfs,dir,"s2");
checkQuotaUsageComputation(dir,11,BLOCKSIZE * 11);
checkQuotaUsageComputation(sub,9,BLOCKSIZE * 11);
hdfs.setOwner(subsub,"unknown","unknown");
hdfs.setReplication(subsubFile1,REPLICATION_1);
checkQuotaUsageComputation(dir,13,BLOCKSIZE * 11);
checkQuotaUsageComputation(sub,11,BLOCKSIZE * 11);
hdfs.delete(subFile1,true);
checkQuotaUsageComputation(new Path("/"),16,BLOCKSIZE * 11);
checkQuotaUsageComputation(dir,15,BLOCKSIZE * 11);
checkQuotaUsageComputation(sub,13,BLOCKSIZE * 11);
Path subsubSnapshotCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subsub.getName());
Path subsubFile1SCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subsub.getName()+ Path.SEPARATOR+ subsubFile1.getName());
Path subFile1SCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subFile1.getName());
FileStatus subsubStatus=hdfs.getFileStatus(subsubSnapshotCopy);
assertEquals("owner",subsubStatus.getOwner());
assertEquals("group",subsubStatus.getGroup());
FileStatus subsubFile1Status=hdfs.getFileStatus(subsubFile1SCopy);
assertEquals(REPLICATION,subsubFile1Status.getReplication());
FileStatus subFile1Status=hdfs.getFileStatus(subFile1SCopy);
assertEquals(REPLICATION_1,subFile1Status.getReplication());
hdfs.deleteSnapshot(dir,"s2");
checkQuotaUsageComputation(new Path("/"),14,BLOCKSIZE * 11);
checkQuotaUsageComputation(dir,13,BLOCKSIZE * 11);
checkQuotaUsageComputation(sub,12,BLOCKSIZE * 11);
try {
hdfs.getFileStatus(subsubSnapshotCopy);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + subsubSnapshotCopy.toString(),e);
}
try {
hdfs.getFileStatus(subsubFile1SCopy);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + subsubFile1SCopy.toString(),e);
}
try {
hdfs.getFileStatus(subFile1SCopy);
fail("should throw FileNotFoundException");
}
catch ( FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + subFile1SCopy.toString(),e);
}
subsubSnapshotCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subsub.getName());
subsubFile1SCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subsub.getName() + Path.SEPARATOR + subsubFile1.getName());
subFile1SCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subFile1.getName());
subsubStatus=hdfs.getFileStatus(subsubSnapshotCopy);
assertEquals("owner",subsubStatus.getOwner());
assertEquals("group",subsubStatus.getGroup());
subsubFile1Status=hdfs.getFileStatus(subsubFile1SCopy);
assertEquals(REPLICATION,subsubFile1Status.getReplication());
subFile1Status=hdfs.getFileStatus(subFile1SCopy);
assertEquals(REPLICATION_1,subFile1Status.getReplication());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test the computation and representation of diff between snapshots
*/
@Test(timeout=60000) public void testDiffReport() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
Path subsub1=new Path(sub1,"subsub1");
Path subsubsub1=new Path(subsub1,"subsubsub1");
hdfs.mkdirs(subsubsub1);
modifyAndCreateSnapshot(sub1,new Path[]{sub1,subsubsub1});
modifyAndCreateSnapshot(subsubsub1,new Path[]{sub1,subsubsub1});
try {
hdfs.getSnapshotDiffReport(subsub1,"s1","s2");
fail("Expect exception when getting snapshot diff report: " + subsub1 + " is not a snapshottable directory.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + subsub1,e);
}
final String invalidName="invalid";
try {
hdfs.getSnapshotDiffReport(sub1,invalidName,invalidName);
fail("Expect exception when providing invalid snapshot name for diff report");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Cannot find the snapshot of directory " + sub1 + " with name "+ invalidName,e);
}
SnapshotDiffReport report=hdfs.getSnapshotDiffReport(sub1,"s0","s0");
System.out.println(report);
assertEquals(0,report.getDiffList().size());
report=hdfs.getSnapshotDiffReport(sub1,"","");
System.out.println(report);
assertEquals(0,report.getDiffList().size());
report=hdfs.getSnapshotDiffReport(subsubsub1,"s0","s2");
System.out.println(report);
assertEquals(0,report.getDiffList().size());
report=hdfs.getSnapshotDiffReport(hdfs.makeQualified(subsubsub1),"s0","s2");
System.out.println(report);
assertEquals(0,report.getDiffList().size());
verifyDiffReport(sub1,"s0","s2",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file15")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file12")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file13")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("link13")));
verifyDiffReport(sub1,"s0","s5",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file15")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file12")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file10")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file13")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("link13")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
verifyDiffReport(sub1,"s2","s5",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file10")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
verifyDiffReport(sub1,"s3","",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file15")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("subsub1/subsubsub1/file12")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")));
}
APIUtilityVerifier InternalCallVerifier ConditionMatcher PublicFieldVerifier
/**
* Test that we cannot read a file beyond its snapshot length
* when accessing it via a snapshot path.
*/
@Test(timeout=300000) public void testSnapshotfileLength() throws Exception {
hdfs.mkdirs(sub);
int bytesRead;
byte[] buffer=new byte[BLOCKSIZE * 8];
FSDataInputStream fis=null;
FileStatus fileStatus=null;
Path file1=new Path(sub,file1Name);
DFSTestUtil.createFile(hdfs,file1,0,REPLICATION,SEED);
DFSTestUtil.appendFile(hdfs,file1,BLOCKSIZE);
hdfs.allowSnapshot(sub);
hdfs.createSnapshot(sub,snapshot1);
DFSTestUtil.appendFile(hdfs,file1,BLOCKSIZE);
fileStatus=hdfs.getFileStatus(file1);
assertThat(fileStatus.getLen(),is((long)BLOCKSIZE * 2));
fis=hdfs.open(file1);
bytesRead=fis.read(0,buffer,0,buffer.length);
assertThat(bytesRead,is(BLOCKSIZE * 2));
fis.close();
Path file1snap1=SnapshotTestHelper.getSnapshotPath(sub,snapshot1,file1Name);
fis=hdfs.open(file1snap1);
fileStatus=hdfs.getFileStatus(file1snap1);
assertThat(fileStatus.getLen(),is((long)BLOCKSIZE));
bytesRead=fis.read(0,buffer,0,buffer.length);
assertThat(bytesRead,is(BLOCKSIZE));
fis.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
/**
* Adding as part of jira HDFS-5343
* Test for checking the cat command on snapshot path it
* cannot read a file beyond snapshot file length
* @throws Exception
*/
@Test(timeout=600000) public void testSnapshotFileLengthWithCatCommand() throws Exception {
FSDataInputStream fis=null;
FileStatus fileStatus=null;
int bytesRead;
byte[] buffer=new byte[BLOCKSIZE * 8];
hdfs.mkdirs(sub);
Path file1=new Path(sub,file1Name);
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,SEED);
hdfs.allowSnapshot(sub);
hdfs.createSnapshot(sub,snapshot1);
DFSTestUtil.appendFile(hdfs,file1,BLOCKSIZE);
fileStatus=hdfs.getFileStatus(file1);
assertEquals("Unexpected file length",BLOCKSIZE * 2,fileStatus.getLen());
fis=hdfs.open(file1);
bytesRead=fis.read(buffer,0,buffer.length);
assertEquals("Unexpected # bytes read",BLOCKSIZE * 2,bytesRead);
fis.close();
Path file1snap1=SnapshotTestHelper.getSnapshotPath(sub,snapshot1,file1Name);
fis=hdfs.open(file1snap1);
fileStatus=hdfs.getFileStatus(file1snap1);
assertEquals(fileStatus.getLen(),BLOCKSIZE);
bytesRead=fis.read(buffer,0,buffer.length);
assertEquals("Unexpected # bytes read",BLOCKSIZE,bytesRead);
fis.close();
PrintStream outBackup=System.out;
PrintStream errBackup=System.err;
ByteArrayOutputStream bao=new ByteArrayOutputStream();
System.setOut(new PrintStream(bao));
System.setErr(new PrintStream(bao));
FsShell shell=new FsShell();
try {
ToolRunner.run(conf,shell,new String[]{"-cat","/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1"});
assertEquals("Unexpected # bytes from -cat",BLOCKSIZE,bao.size());
}
finally {
System.setOut(outBackup);
System.setErr(errBackup);
}
}
IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test listing snapshots under a snapshottable directory
*/
@Test(timeout=15000) public void testListSnapshots() throws Exception {
final Path snapshotsPath=new Path(dir,".snapshot");
FileStatus[] stats=null;
stats=hdfs.listStatus(new Path("/.snapshot"));
assertEquals(0,stats.length);
try {
stats=hdfs.listStatus(snapshotsPath);
fail("expect SnapshotException");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + dir.toString(),e);
}
hdfs.allowSnapshot(dir);
stats=hdfs.listStatus(snapshotsPath);
assertEquals(0,stats.length);
final int snapshotNum=5;
for (int sNum=0; sNum < snapshotNum; sNum++) {
hdfs.createSnapshot(dir,"s_" + sNum);
stats=hdfs.listStatus(snapshotsPath);
assertEquals(sNum + 1,stats.length);
for (int i=0; i <= sNum; i++) {
assertEquals("s_" + i,stats[i].getPath().getName());
}
}
for (int sNum=snapshotNum - 1; sNum > 0; sNum--) {
hdfs.deleteSnapshot(dir,"s_" + sNum);
stats=hdfs.listStatus(snapshotsPath);
assertEquals(sNum,stats.length);
for (int i=0; i < sNum; i++) {
assertEquals("s_" + i,stats[i].getPath().getName());
}
}
hdfs.deleteSnapshot(dir,"s_0");
stats=hdfs.listStatus(snapshotsPath);
assertEquals(0,stats.length);
}
InternalCallVerifier EqualityVerifier
/**
* Test the metric SnapshottableDirectories, AllowSnapshotOps,
* DisallowSnapshotOps, and listSnapshottableDirOps
*/
@Test public void testSnapshottableDirs() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
assertGauge("SnapshottableDirectories",0,getMetrics(NS_METRICS));
assertCounter("AllowSnapshotOps",0L,getMetrics(NN_METRICS));
assertCounter("DisallowSnapshotOps",0L,getMetrics(NN_METRICS));
hdfs.allowSnapshot(sub1);
assertGauge("SnapshottableDirectories",1,getMetrics(NS_METRICS));
assertCounter("AllowSnapshotOps",1L,getMetrics(NN_METRICS));
Path sub2=new Path(dir,"sub2");
Path file=new Path(sub2,"file");
DFSTestUtil.createFile(hdfs,file,1024,REPLICATION,seed);
hdfs.allowSnapshot(sub2);
assertGauge("SnapshottableDirectories",2,getMetrics(NS_METRICS));
assertCounter("AllowSnapshotOps",2L,getMetrics(NN_METRICS));
Path subsub1=new Path(sub1,"sub1sub1");
Path subfile=new Path(subsub1,"file");
DFSTestUtil.createFile(hdfs,subfile,1024,REPLICATION,seed);
hdfs.allowSnapshot(subsub1);
assertGauge("SnapshottableDirectories",3,getMetrics(NS_METRICS));
assertCounter("AllowSnapshotOps",3L,getMetrics(NN_METRICS));
hdfs.allowSnapshot(sub1);
assertGauge("SnapshottableDirectories",3,getMetrics(NS_METRICS));
assertCounter("AllowSnapshotOps",4L,getMetrics(NN_METRICS));
hdfs.disallowSnapshot(sub1);
assertGauge("SnapshottableDirectories",2,getMetrics(NS_METRICS));
assertCounter("DisallowSnapshotOps",1L,getMetrics(NN_METRICS));
hdfs.delete(subsub1,true);
assertGauge("SnapshottableDirectories",1,getMetrics(NS_METRICS));
SnapshottableDirectoryStatus[] status=hdfs.getSnapshottableDirListing();
assertEquals(1,status.length);
assertCounter("ListSnapshottableDirOps",1L,getMetrics(NN_METRICS));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test FileStatus of snapshot file before/after rename
*/
@Test(timeout=60000) public void testSnapshotRename() throws Exception {
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
Path snapshotRoot=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s1");
Path ssPath=new Path(snapshotRoot,file1.getName());
assertTrue(hdfs.exists(ssPath));
FileStatus statusBeforeRename=hdfs.getFileStatus(ssPath);
hdfs.renameSnapshot(sub1,"s1","s2");
assertFalse(hdfs.exists(ssPath));
snapshotRoot=SnapshotTestHelper.getSnapshotRoot(sub1,"s2");
ssPath=new Path(snapshotRoot,file1.getName());
assertTrue(hdfs.exists(ssPath));
FileStatus statusAfterRename=hdfs.getFileStatus(ssPath);
assertFalse(statusBeforeRename.equals(statusAfterRename));
statusBeforeRename.setPath(statusAfterRename.getPath());
assertEquals(statusBeforeRename.toString(),statusAfterRename.toString());
}
InternalCallVerifier BooleanVerifier
@Test public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
System.setErr(psOut);
FsShell shell=new FsShell();
shell.setConf(conf);
String[] argv1={"-renameSnapshot","/tmp","s1"};
int val=shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString().contains(argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2={"-renameSnapshot","/tmp","s1","s2","s3"};
val=shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString().contains(argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test replication number calculation for a file with snapshots.
*/
@Test(timeout=60000) public void testReplicationWithSnapshot() throws Exception {
short fileRep=1;
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,fileRep,seed);
Map snapshotRepMap=new HashMap();
for (; fileRep < NUMDATANODE; ) {
Path snapshotRoot=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s" + fileRep);
Path snapshot=new Path(snapshotRoot,file1.getName());
assertEquals(fileRep,getINodeFile(snapshot).getFileReplication());
snapshotRepMap.put(snapshot,fileRep);
hdfs.setReplication(file1,++fileRep);
checkFileReplication(file1,fileRep,fileRep);
checkSnapshotFileReplication(file1,snapshotRepMap,fileRep);
}
hdfs.setReplication(file1,REPLICATION);
checkFileReplication(file1,REPLICATION,(short)(NUMDATANODE - 1));
checkSnapshotFileReplication(file1,snapshotRepMap,(short)(NUMDATANODE - 1));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test replication for a file with snapshots, also including the scenario
* where the original file is deleted
*/
@Test(timeout=60000) public void testReplicationAfterDeletion() throws Exception {
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
Map snapshotRepMap=new HashMap();
for (int i=1; i <= 3; i++) {
Path root=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s" + i);
Path ssFile=new Path(root,file1.getName());
snapshotRepMap.put(ssFile,REPLICATION);
}
checkFileReplication(file1,REPLICATION,REPLICATION);
checkSnapshotFileReplication(file1,snapshotRepMap,REPLICATION);
hdfs.delete(file1,true);
for ( Path ss : snapshotRepMap.keySet()) {
final INodeFile ssInode=getINodeFile(ss);
assertEquals(REPLICATION,ssInode.getBlockReplication());
assertEquals(snapshotRepMap.get(ss).shortValue(),ssInode.getFileReplication());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test getting SnapshotStatsMXBean information
*/
@Test public void testSnapshotStatsMXBeanInfo() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=null;
String pathName="/snapshot";
Path path=new Path(pathName);
try {
cluster=new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
SnapshotManager sm=cluster.getNamesystem().getSnapshotManager();
DistributedFileSystem dfs=(DistributedFileSystem)cluster.getFileSystem();
dfs.mkdirs(path);
dfs.allowSnapshot(path);
dfs.createSnapshot(path);
MBeanServer mbs=ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=SnapshotInfo");
CompositeData[] directories=(CompositeData[])mbs.getAttribute(mxbeanName,"SnapshottableDirectories");
int numDirectories=Array.getLength(directories);
assertEquals(sm.getNumSnapshottableDirs(),numDirectories);
CompositeData[] snapshots=(CompositeData[])mbs.getAttribute(mxbeanName,"Snapshots");
int numSnapshots=Array.getLength(snapshots);
assertEquals(sm.getNumSnapshots(),numSnapshots);
CompositeData d=(CompositeData)Array.get(directories,0);
CompositeData s=(CompositeData)Array.get(snapshots,0);
assertTrue(((String)d.get("path")).contains(pathName));
assertTrue(((String)s.get("snapshotDirectory")).contains(pathName));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test listing all the snapshottable directories
*/
@Test(timeout=60000) public void testListSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
SnapshottableDirectoryStatus[] dirs=hdfs.getSnapshottableDirListing();
assertNull(dirs);
final Path root=new Path("/");
hdfs.allowSnapshot(root);
dirs=hdfs.getSnapshottableDirListing();
assertEquals(1,dirs.length);
assertEquals("",dirs[0].getDirStatus().getLocalName());
assertEquals(root,dirs[0].getFullPath());
hdfs.disallowSnapshot(root);
dirs=hdfs.getSnapshottableDirListing();
assertNull(dirs);
hdfs.allowSnapshot(dir1);
dirs=hdfs.getSnapshottableDirListing();
assertEquals(1,dirs.length);
assertEquals(dir1.getName(),dirs[0].getDirStatus().getLocalName());
assertEquals(dir1,dirs[0].getFullPath());
assertEquals(0,dirs[0].getSnapshotNumber());
hdfs.allowSnapshot(dir2);
dirs=hdfs.getSnapshottableDirListing();
assertEquals(2,dirs.length);
assertEquals(dir1.getName(),dirs[0].getDirStatus().getLocalName());
assertEquals(dir1,dirs[0].getFullPath());
assertEquals(dir2.getName(),dirs[1].getDirStatus().getLocalName());
assertEquals(dir2,dirs[1].getFullPath());
assertEquals(0,dirs[1].getSnapshotNumber());
final Path dir3=new Path("/TestSnapshot3");
hdfs.mkdirs(dir3);
hdfs.rename(dir3,dir2,Rename.OVERWRITE);
dirs=hdfs.getSnapshottableDirListing();
assertEquals(1,dirs.length);
assertEquals(dir1,dirs[0].getFullPath());
hdfs.allowSnapshot(dir2);
hdfs.createSnapshot(dir2,"s1");
hdfs.createSnapshot(dir2,"s2");
dirs=hdfs.getSnapshottableDirListing();
assertEquals(dir2,dirs[1].getFullPath());
assertEquals(2,dirs[1].getSnapshotNumber());
Path sub1=new Path(dir1,"sub1");
Path file1=new Path(sub1,"file1");
Path sub2=new Path(dir1,"sub2");
Path file2=new Path(sub2,"file2");
DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed);
DFSTestUtil.createFile(hdfs,file2,BLOCKSIZE,REPLICATION,seed);
hdfs.allowSnapshot(sub1);
hdfs.allowSnapshot(sub2);
dirs=hdfs.getSnapshottableDirListing();
assertEquals(4,dirs.length);
assertEquals(dir1,dirs[0].getFullPath());
assertEquals(dir2,dirs[1].getFullPath());
assertEquals(sub1,dirs[2].getFullPath());
assertEquals(sub2,dirs[3].getFullPath());
hdfs.disallowSnapshot(sub1);
dirs=hdfs.getSnapshottableDirListing();
assertEquals(3,dirs.length);
assertEquals(dir1,dirs[0].getFullPath());
assertEquals(dir2,dirs[1].getFullPath());
assertEquals(sub2,dirs[2].getFullPath());
hdfs.delete(dir1,true);
dirs=hdfs.getSnapshottableDirListing();
assertEquals(1,dirs.length);
assertEquals(dir2.getName(),dirs[0].getDirStatus().getLocalName());
assertEquals(dir2,dirs[0].getFullPath());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test the listing with different user names to make sure only directories
* that are owned by the user are listed.
*/
@Test(timeout=60000) public void testListWithDifferentUser() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
hdfs.allowSnapshot(dir1);
hdfs.allowSnapshot(dir2);
hdfs.setPermission(root,FsPermission.valueOf("-rwxrwxrwx"));
UserGroupInformation ugi1=UserGroupInformation.createUserForTesting("user1",new String[]{"group1"});
DistributedFileSystem fs1=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(ugi1,conf);
Path dir1_user1=new Path("/dir1_user1");
Path dir2_user1=new Path("/dir2_user1");
fs1.mkdirs(dir1_user1);
fs1.mkdirs(dir2_user1);
hdfs.allowSnapshot(dir1_user1);
hdfs.allowSnapshot(dir2_user1);
UserGroupInformation ugi2=UserGroupInformation.createUserForTesting("user2",new String[]{"group2"});
DistributedFileSystem fs2=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(ugi2,conf);
Path dir_user2=new Path("/dir_user2");
Path subdir_user2=new Path(dir_user2,"subdir");
fs2.mkdirs(dir_user2);
fs2.mkdirs(subdir_user2);
hdfs.allowSnapshot(dir_user2);
hdfs.allowSnapshot(subdir_user2);
String supergroup=conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
UserGroupInformation superUgi=UserGroupInformation.createUserForTesting("superuser",new String[]{supergroup});
DistributedFileSystem fs3=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(superUgi,conf);
SnapshottableDirectoryStatus[] dirs=fs3.getSnapshottableDirListing();
assertEquals(6,dirs.length);
dirs=fs1.getSnapshottableDirListing();
assertEquals(2,dirs.length);
assertEquals(dir1_user1,dirs[0].getFullPath());
assertEquals(dir2_user1,dirs[1].getFullPath());
dirs=fs2.getSnapshottableDirListing();
assertEquals(2,dirs.length);
assertEquals(dir_user2,dirs[0].getFullPath());
assertEquals(subdir_user2,dirs[1].getFullPath());
}
InternalCallVerifier EqualityVerifier
/**
* 1) Save xattrs, then create snapshot. Assert that inode of original and
* snapshot have same xattrs. 2) Change the original xattrs, assert snapshot
* still has old xattrs.
*/
@Test public void testXAttrForSnapshotRootAfterChange() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
Map xattrs=hdfs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
xattrs=hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
hdfs.setXAttr(path,name1,newValue1);
doSnapshotRootChangeAssertions(path,snapshotPath);
restart(false);
doSnapshotRootChangeAssertions(path,snapshotPath);
restart(true);
doSnapshotRootChangeAssertions(path,snapshotPath);
}
InternalCallVerifier EqualityVerifier
/**
* Tests modifying xattrs on a directory that has been snapshotted
*/
@Test(timeout=120000) public void testModifyReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
Map xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),2);
assertArrayEquals(value1,xattrs.get(name1));
assertArrayEquals(value2,xattrs.get(name2));
xattrs=hdfs.getXAttrs(snapshotPath);
assertEquals(xattrs.size(),0);
hdfs.setXAttr(path,name1,value2,EnumSet.of(XAttrSetFlag.REPLACE));
xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),2);
assertArrayEquals(value2,xattrs.get(name1));
assertArrayEquals(value2,xattrs.get(name2));
hdfs.setXAttr(path,name2,value1,EnumSet.of(XAttrSetFlag.REPLACE));
xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),2);
assertArrayEquals(value2,xattrs.get(name1));
assertArrayEquals(value1,xattrs.get(name2));
xattrs=hdfs.getXAttrs(snapshotPath);
assertEquals(xattrs.size(),0);
hdfs.removeXAttr(path,name1);
hdfs.removeXAttr(path,name2);
xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),0);
}
InternalCallVerifier EqualityVerifier
/**
* 1) Save xattrs, then create snapshot. Assert that inode of original and
* snapshot have same xattrs. 2) Remove some original xattrs, assert snapshot
* still has old xattrs.
*/
@Test public void testXAttrForSnapshotRootAfterRemove() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
Map xattrs=hdfs.getXAttrs(path);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
xattrs=hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(xattrs.size(),2);
Assert.assertArrayEquals(value1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
hdfs.removeXAttr(path,name1);
hdfs.removeXAttr(path,name2);
doSnapshotRootRemovalAssertions(path,snapshotPath);
restart(false);
doSnapshotRootRemovalAssertions(path,snapshotPath);
restart(true);
doSnapshotRootRemovalAssertions(path,snapshotPath);
}
InternalCallVerifier EqualityVerifier
/**
* Assert exception of setting xattr when exceeding quota.
*/
@Test public void testSetXAttrExceedsQuota() throws Exception {
Path filePath=new Path(path,"file1");
Path fileSnapshotPath=new Path(snapshotPath,"file1");
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path,3,HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close();
hdfs.setXAttr(filePath,name1,value1);
hdfs.createSnapshot(path,snapshotName);
byte[] value=hdfs.getXAttr(filePath,name1);
Assert.assertArrayEquals(value,value1);
value=hdfs.getXAttr(fileSnapshotPath,name1);
Assert.assertArrayEquals(value,value1);
exception.expect(NSQuotaExceededException.class);
hdfs.setXAttr(filePath,name2,value2);
}
InternalCallVerifier EqualityVerifier
/**
* Tests removing xattrs on a directory that has been snapshotted
*/
@Test(timeout=120000) public void testRemoveReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
Map xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),2);
assertArrayEquals(value1,xattrs.get(name1));
assertArrayEquals(value2,xattrs.get(name2));
xattrs=hdfs.getXAttrs(snapshotPath);
assertEquals(xattrs.size(),0);
hdfs.removeXAttr(path,name2);
xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),1);
assertArrayEquals(value1,xattrs.get(name1));
hdfs.removeXAttr(path,name1);
xattrs=hdfs.getXAttrs(path);
assertEquals(xattrs.size(),0);
}
InternalCallVerifier EqualityVerifier
/**
* Test successive snapshots in between modifications of XAttrs.
* Also verify that snapshot XAttrs are not altered when a
* snapshot is deleted.
*/
@Test public void testSuccessiveSnapshotXAttrChanges() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
hdfs.setXAttr(path,name1,value1);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
Map xattrs=hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(1,xattrs.size());
Assert.assertArrayEquals(value1,xattrs.get(name1));
hdfs.setXAttr(path,name1,newValue1);
hdfs.setXAttr(path,name2,value2);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName2);
xattrs=hdfs.getXAttrs(snapshotPath2);
Assert.assertEquals(2,xattrs.size());
Assert.assertArrayEquals(newValue1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
hdfs.setXAttr(path,name1,value1);
hdfs.removeXAttr(path,name2);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName3);
xattrs=hdfs.getXAttrs(snapshotPath3);
Assert.assertEquals(1,xattrs.size());
Assert.assertArrayEquals(value1,xattrs.get(name1));
xattrs=hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(1,xattrs.size());
Assert.assertArrayEquals(value1,xattrs.get(name1));
xattrs=hdfs.getXAttrs(snapshotPath2);
Assert.assertEquals(2,xattrs.size());
Assert.assertArrayEquals(newValue1,xattrs.get(name1));
Assert.assertArrayEquals(value2,xattrs.get(name2));
hdfs.deleteSnapshot(path,snapshotName2);
xattrs=hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(1,xattrs.size());
Assert.assertArrayEquals(value1,xattrs.get(name1));
xattrs=hdfs.getXAttrs(snapshotPath3);
Assert.assertEquals(1,xattrs.size());
Assert.assertArrayEquals(value1,xattrs.get(name1));
hdfs.deleteSnapshot(path,snapshotName);
hdfs.deleteSnapshot(path,snapshotName3);
}
InternalCallVerifier EqualityVerifier
/**
* Assert exception of removing xattr when exceeding quota.
*/
@Test public void testRemoveXAttrExceedsQuota() throws Exception {
Path filePath=new Path(path,"file1");
Path fileSnapshotPath=new Path(snapshotPath,"file1");
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path,3,HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close();
hdfs.setXAttr(filePath,name1,value1);
hdfs.createSnapshot(path,snapshotName);
byte[] value=hdfs.getXAttr(filePath,name1);
Assert.assertArrayEquals(value,value1);
value=hdfs.getXAttr(fileSnapshotPath,name1);
Assert.assertArrayEquals(value,value1);
exception.expect(NSQuotaExceededException.class);
hdfs.removeXAttr(filePath,name1);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that users can copy a snapshot while preserving its xattrs.
*/
@Test(timeout=120000) public void testCopySnapshotShouldPreserveXAttrs() throws Exception {
FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700));
hdfs.setXAttr(path,name1,value1);
hdfs.setXAttr(path,name2,value2);
SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName);
Path snapshotCopy=new Path(path.toString() + "-copy");
String[] argv=new String[]{"-cp","-px",snapshotPath.toUri().toString(),snapshotCopy.toUri().toString()};
int ret=ToolRunner.run(new FsShell(conf),argv);
assertEquals("cp -px is not working on a snapshot",SUCCESS,ret);
Map xattrs=hdfs.getXAttrs(snapshotCopy);
assertArrayEquals(value1,xattrs.get(name1));
assertArrayEquals(value2,xattrs.get(name2));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testThreadSafety() throws Exception {
int numThreads=100;
Phase[] phases={LOADING_FSIMAGE,LOADING_FSIMAGE,LOADING_EDITS,LOADING_EDITS};
Step[] steps=new Step[]{new Step(INODES),new Step(DELEGATION_KEYS),new Step(INODES),new Step(DELEGATION_KEYS)};
String[] files={"file1","file1","file2","file2"};
long[] sizes={1000L,1000L,2000L,2000L};
long[] totals={10000L,20000L,30000L,40000L};
ExecutorService exec=Executors.newFixedThreadPool(numThreads);
try {
for (int i=0; i < numThreads; ++i) {
final Phase phase=phases[i % phases.length];
final Step step=steps[i % steps.length];
final String file=files[i % files.length];
final long size=sizes[i % sizes.length];
final long total=totals[i % totals.length];
exec.submit(new Callable(){
@Override public Void call(){
startupProgress.beginPhase(phase);
startupProgress.setFile(phase,file);
startupProgress.setSize(phase,size);
startupProgress.setTotal(phase,step,total);
incrementCounter(startupProgress,phase,step,100L);
startupProgress.endStep(phase,step);
startupProgress.endPhase(phase);
return null;
}
}
);
}
}
finally {
exec.shutdown();
assertTrue(exec.awaitTermination(10000L,TimeUnit.MILLISECONDS));
}
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertEquals("file1",view.getFile(LOADING_FSIMAGE));
assertEquals(1000L,view.getSize(LOADING_FSIMAGE));
assertEquals(10000L,view.getTotal(LOADING_FSIMAGE,new Step(INODES)));
assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(INODES)));
assertEquals(20000L,view.getTotal(LOADING_FSIMAGE,new Step(DELEGATION_KEYS)));
assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(DELEGATION_KEYS)));
assertEquals("file2",view.getFile(LOADING_EDITS));
assertEquals(2000L,view.getSize(LOADING_EDITS));
assertEquals(30000L,view.getTotal(LOADING_EDITS,new Step(INODES)));
assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(INODES)));
assertEquals(40000L,view.getTotal(LOADING_EDITS,new Step(DELEGATION_KEYS)));
assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(DELEGATION_KEYS)));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testCounter(){
startupProgress.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes=new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes);
incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageInodes,100L);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes);
Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageDelegationKeys,200L);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
Step loadingEditsFile=new Step("file",1000L);
startupProgress.beginStep(LOADING_EDITS,loadingEditsFile);
incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L);
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertEquals(100L,view.getCount(LOADING_FSIMAGE,loadingFsImageInodes));
assertEquals(200L,view.getCount(LOADING_FSIMAGE,loadingFsImageDelegationKeys));
assertEquals(5000L,view.getCount(LOADING_EDITS,loadingEditsFile));
assertEquals(0L,view.getCount(SAVING_CHECKPOINT,new Step(INODES)));
incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,1000L);
startupProgress.endStep(LOADING_EDITS,loadingEditsFile);
startupProgress.endPhase(LOADING_EDITS);
assertEquals(5000L,view.getCount(LOADING_EDITS,loadingEditsFile));
view=startupProgress.createView();
assertNotNull(view);
assertEquals(6000L,view.getCount(LOADING_EDITS,loadingEditsFile));
}
InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=10000) public void testStepSequence(){
Step[] expectedSteps=new Step[]{new Step(INODES,"file1"),new Step(DELEGATION_KEYS,"file1"),new Step(INODES,"file2"),new Step(DELEGATION_KEYS,"file2"),new Step(INODES,"file3"),new Step(DELEGATION_KEYS,"file3")};
List shuffledSteps=new ArrayList(Arrays.asList(expectedSteps));
Collections.shuffle(shuffledSteps);
startupProgress.beginPhase(SAVING_CHECKPOINT);
for ( Step step : shuffledSteps) {
startupProgress.beginStep(SAVING_CHECKPOINT,step);
}
List actualSteps=new ArrayList(expectedSteps.length);
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
for ( Step step : view.getSteps(SAVING_CHECKPOINT)) {
actualSteps.add(step);
}
assertArrayEquals(expectedSteps,actualSteps.toArray());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testElapsedTime() throws Exception {
startupProgress.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes=new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes);
Thread.sleep(50L);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes);
Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
Thread.sleep(50L);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
Step loadingEditsFile=new Step("file",1000L);
startupProgress.beginStep(LOADING_EDITS,loadingEditsFile);
startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L);
incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L);
Thread.sleep(50L);
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertTrue(view.getElapsedTime() > 0);
assertTrue(view.getElapsedTime(LOADING_FSIMAGE) > 0);
assertTrue(view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes) > 0);
assertTrue(view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageDelegationKeys) > 0);
assertTrue(view.getElapsedTime(LOADING_EDITS) > 0);
assertTrue(view.getElapsedTime(LOADING_EDITS,loadingEditsFile) > 0);
assertTrue(view.getElapsedTime(SAVING_CHECKPOINT) == 0);
assertTrue(view.getElapsedTime(SAVING_CHECKPOINT,new Step(INODES)) == 0);
long totalTime=view.getElapsedTime();
long loadingFsImageTime=view.getElapsedTime(LOADING_FSIMAGE);
long loadingFsImageInodesTime=view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes);
long loadingFsImageDelegationKeysTime=view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes);
long loadingEditsTime=view.getElapsedTime(LOADING_EDITS);
long loadingEditsFileTime=view.getElapsedTime(LOADING_EDITS,loadingEditsFile);
Thread.sleep(50L);
assertTrue(totalTime < view.getElapsedTime());
assertEquals(loadingFsImageTime,view.getElapsedTime(LOADING_FSIMAGE));
assertEquals(loadingFsImageInodesTime,view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes));
assertTrue(loadingEditsTime < view.getElapsedTime(LOADING_EDITS));
assertTrue(loadingEditsFileTime < view.getElapsedTime(LOADING_EDITS,loadingEditsFile));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testPercentComplete(){
startupProgress.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes=new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes);
startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageInodes,1000L);
incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageInodes,100L);
Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageDelegationKeys,800L);
incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageDelegationKeys,200L);
startupProgress.beginPhase(LOADING_EDITS);
Step loadingEditsFile=new Step("file",1000L);
startupProgress.beginStep(LOADING_EDITS,loadingEditsFile);
startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L);
incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L);
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertEquals(0.167f,view.getPercentComplete(),0.001f);
assertEquals(0.167f,view.getPercentComplete(LOADING_FSIMAGE),0.001f);
assertEquals(0.10f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageInodes),0.001f);
assertEquals(0.25f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageDelegationKeys),0.001f);
assertEquals(0.5f,view.getPercentComplete(LOADING_EDITS),0.001f);
assertEquals(0.5f,view.getPercentComplete(LOADING_EDITS,loadingEditsFile),0.001f);
assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT),0.001f);
assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT,new Step(INODES)),0.001f);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.endStep(LOADING_EDITS,loadingEditsFile);
startupProgress.endPhase(LOADING_EDITS);
view=startupProgress.createView();
assertNotNull(view);
assertEquals(0.5f,view.getPercentComplete(),0.001f);
assertEquals(1.0f,view.getPercentComplete(LOADING_FSIMAGE),0.001f);
assertEquals(1.0f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageInodes),0.001f);
assertEquals(1.0f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageDelegationKeys),0.001f);
assertEquals(1.0f,view.getPercentComplete(LOADING_EDITS),0.001f);
assertEquals(1.0f,view.getPercentComplete(LOADING_EDITS,loadingEditsFile),0.001f);
assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT),0.001f);
assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT,new Step(INODES)),0.001f);
}
UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testInitialState(){
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertEquals(0L,view.getElapsedTime());
assertEquals(0.0f,view.getPercentComplete(),0.001f);
List phases=new ArrayList();
for ( Phase phase : view.getPhases()) {
phases.add(phase);
assertEquals(0L,view.getElapsedTime(phase));
assertNull(view.getFile(phase));
assertEquals(0.0f,view.getPercentComplete(phase),0.001f);
assertEquals(Long.MIN_VALUE,view.getSize(phase));
assertEquals(PENDING,view.getStatus(phase));
assertEquals(0L,view.getTotal(phase));
for ( Step step : view.getSteps(phase)) {
fail(String.format("unexpected step %s in phase %s at initial state",step,phase));
}
}
assertArrayEquals(EnumSet.allOf(Phase.class).toArray(),phases.toArray());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFrozenAfterStartupCompletes(){
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.setFile(LOADING_FSIMAGE,"file1");
startupProgress.setSize(LOADING_FSIMAGE,1000L);
Step step=new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE,step);
startupProgress.setTotal(LOADING_FSIMAGE,step,10000L);
incrementCounter(startupProgress,LOADING_FSIMAGE,step,100L);
startupProgress.endStep(LOADING_FSIMAGE,step);
startupProgress.endPhase(LOADING_FSIMAGE);
for ( Phase phase : EnumSet.allOf(Phase.class)) {
if (startupProgress.getStatus(phase) != Status.COMPLETE) {
startupProgress.beginPhase(phase);
startupProgress.endPhase(phase);
}
}
StartupProgressView before=startupProgress.createView();
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.setFile(LOADING_FSIMAGE,"file2");
startupProgress.setSize(LOADING_FSIMAGE,2000L);
startupProgress.beginStep(LOADING_FSIMAGE,step);
startupProgress.setTotal(LOADING_FSIMAGE,step,20000L);
incrementCounter(startupProgress,LOADING_FSIMAGE,step,100L);
startupProgress.endStep(LOADING_FSIMAGE,step);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
Step newStep=new Step("file1");
startupProgress.beginStep(LOADING_EDITS,newStep);
incrementCounter(startupProgress,LOADING_EDITS,newStep,100L);
startupProgress.endStep(LOADING_EDITS,newStep);
startupProgress.endPhase(LOADING_EDITS);
StartupProgressView after=startupProgress.createView();
assertEquals(before.getCount(LOADING_FSIMAGE),after.getCount(LOADING_FSIMAGE));
assertEquals(before.getCount(LOADING_FSIMAGE,step),after.getCount(LOADING_FSIMAGE,step));
assertEquals(before.getElapsedTime(),after.getElapsedTime());
assertEquals(before.getElapsedTime(LOADING_FSIMAGE),after.getElapsedTime(LOADING_FSIMAGE));
assertEquals(before.getElapsedTime(LOADING_FSIMAGE,step),after.getElapsedTime(LOADING_FSIMAGE,step));
assertEquals(before.getFile(LOADING_FSIMAGE),after.getFile(LOADING_FSIMAGE));
assertEquals(before.getSize(LOADING_FSIMAGE),after.getSize(LOADING_FSIMAGE));
assertEquals(before.getTotal(LOADING_FSIMAGE),after.getTotal(LOADING_FSIMAGE));
assertEquals(before.getTotal(LOADING_FSIMAGE,step),after.getTotal(LOADING_FSIMAGE,step));
assertFalse(after.getSteps(LOADING_EDITS).iterator().hasNext());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testTotal(){
startupProgress.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes=new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes);
startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageInodes,1000L);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes);
Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS);
startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageDelegationKeys,800L);
startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
Step loadingEditsFile=new Step("file",1000L);
startupProgress.beginStep(LOADING_EDITS,loadingEditsFile);
startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L);
startupProgress.endStep(LOADING_EDITS,loadingEditsFile);
startupProgress.endPhase(LOADING_EDITS);
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertEquals(1000L,view.getTotal(LOADING_FSIMAGE,loadingFsImageInodes));
assertEquals(800L,view.getTotal(LOADING_FSIMAGE,loadingFsImageDelegationKeys));
assertEquals(10000L,view.getTotal(LOADING_EDITS,loadingEditsFile));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testStatus(){
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
StartupProgressView view=startupProgress.createView();
assertNotNull(view);
assertEquals(COMPLETE,view.getStatus(LOADING_FSIMAGE));
assertEquals(RUNNING,view.getStatus(LOADING_EDITS));
assertEquals(PENDING,view.getStatus(SAVING_CHECKPOINT));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testDataLocality() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
final String[] racks={RACK0,RACK0,RACK1,RACK1,RACK2,RACK2};
final int nDataNodes=racks.length;
LOG.info("nDataNodes=" + nDataNodes + ", racks="+ Arrays.asList(racks));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(nDataNodes).racks(racks).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final NameNode namenode=cluster.getNameNode();
final DatanodeManager dm=namenode.getNamesystem().getBlockManager().getDatanodeManager();
LOG.info("dm=" + dm);
final long blocksize=DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
final String f="/foo";
{
for (int i=0; i < nDataNodes; i++) {
final DataNode dn=cluster.getDataNodes().get(i);
final String ipAddr=dm.getDatanode(dn.getDatanodeId()).getIpAddr();
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PutOpParam.Op.CREATE,-1L,blocksize,null);
Assert.assertEquals(ipAddr,chosen.getIpAddr());
}
}
final Path p=new Path(f);
final FSDataOutputStream out=dfs.create(p,(short)1);
out.write(1);
out.close();
final LocatedBlocks locatedblocks=NameNodeAdapter.getBlockLocations(namenode,f,0,1);
final List lb=locatedblocks.getLocatedBlocks();
Assert.assertEquals(1,lb.size());
final DatanodeInfo[] locations=lb.get(0).getLocations();
Assert.assertEquals(1,locations.length);
final DatanodeInfo expected=locations[0];
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.GETFILECHECKSUM,-1L,blocksize,null);
Assert.assertEquals(expected,chosen);
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.OPEN,0,blocksize,null);
Assert.assertEquals(expected,chosen);
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PostOpParam.Op.APPEND,-1L,blocksize,null);
Assert.assertEquals(expected,chosen);
}
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testExcludeDataNodes() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
final String[] racks={RACK0,RACK0,RACK1,RACK1,RACK2,RACK2};
final String[] hosts={"DataNode1","DataNode2","DataNode3","DataNode4","DataNode5","DataNode6"};
final int nDataNodes=hosts.length;
LOG.info("nDataNodes=" + nDataNodes + ", racks="+ Arrays.asList(racks)+ ", hosts="+ Arrays.asList(hosts));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final NameNode namenode=cluster.getNameNode();
final DatanodeManager dm=namenode.getNamesystem().getBlockManager().getDatanodeManager();
LOG.info("dm=" + dm);
final long blocksize=DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
final String f="/foo";
final Path p=new Path(f);
final FSDataOutputStream out=dfs.create(p,(short)3);
out.write(1);
out.close();
final LocatedBlocks locatedblocks=NameNodeAdapter.getBlockLocations(namenode,f,0,1);
final List lb=locatedblocks.getLocatedBlocks();
Assert.assertEquals(1,lb.size());
final DatanodeInfo[] locations=lb.get(0).getLocations();
Assert.assertEquals(3,locations.length);
StringBuffer sb=new StringBuffer();
for (int i=0; i < 2; i++) {
sb.append(locations[i].getXferAddr());
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.GETFILECHECKSUM,-1L,blocksize,sb.toString());
for (int j=0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName());
}
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.OPEN,0,blocksize,sb.toString());
for (int j=0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName());
}
}
{
final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PostOpParam.Op.APPEND,-1L,blocksize,sb.toString());
for (int j=0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName());
}
}
sb.append(",");
}
}
finally {
cluster.shutdown();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=60000) public void testEviction() throws Exception {
final ShortCircuitCache cache=new ShortCircuitCache(2,10000000,1,10000000,1,10000,0);
final TestFileDescriptorPair pairs[]=new TestFileDescriptorPair[]{new TestFileDescriptorPair(),new TestFileDescriptorPair(),new TestFileDescriptorPair()};
ShortCircuitReplicaInfo replicaInfos[]=new ShortCircuitReplicaInfo[]{null,null,null};
for (int i=0; i < pairs.length; i++) {
replicaInfos[i]=cache.fetchOrCreate(new ExtendedBlockId(i,"test_bp1"),new SimpleReplicaCreator(i,cache,pairs[i]));
Preconditions.checkNotNull(replicaInfos[i].getReplica());
Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null);
pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),replicaInfos[i].getReplica().getMetaStream());
}
for (int i=0; i < pairs.length; i++) {
replicaInfos[i].getReplica().unref();
}
for (int i=1; i < pairs.length; i++) {
final Integer iVal=new Integer(i);
replicaInfos[i]=cache.fetchOrCreate(new ExtendedBlockId(i,"test_bp1"),new ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
Assert.fail("expected to use existing entry for " + iVal);
return null;
}
}
);
Preconditions.checkNotNull(replicaInfos[i].getReplica());
Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null);
pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),replicaInfos[i].getReplica().getMetaStream());
}
final MutableBoolean calledCreate=new MutableBoolean(false);
replicaInfos[0]=cache.fetchOrCreate(new ExtendedBlockId(0,"test_bp1"),new ShortCircuitReplicaCreator(){
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){
calledCreate.setValue(true);
return null;
}
}
);
Preconditions.checkState(replicaInfos[0].getReplica() == null);
Assert.assertTrue(calledCreate.isTrue());
for (int i=1; i < pairs.length; i++) {
replicaInfos[i].getReplica().unref();
}
for (int i=0; i < pairs.length; i++) {
pairs[i].close();
}
cache.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testShmBasedStaleness() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testShmBasedStaleness",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache();
String TEST_FILE="/test_file";
final int TEST_FILE_LEN=8193;
final int SEED=0xFADED;
DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED);
FSDataInputStream fis=fs.open(new Path(TEST_FILE));
int first=fis.read();
final ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,new Path(TEST_FILE));
Assert.assertTrue(first != -1);
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
ShortCircuitReplica replica=replicas.get(ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertTrue(replica.getSlot().isValid());
}
}
);
cluster.getDataNodes().get(0).shutdown();
cache.accept(new CacheVisitor(){
@Override public void visit( int numOutstandingMmaps, Map replicas, Map failedLoads, Map evictable, Map evictableMmapped){
ShortCircuitReplica replica=replicas.get(ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertFalse(replica.getSlot().isValid());
}
}
);
cluster.shutdown();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test unlinking a file whose blocks we are caching in the DFSClient.
* The DataNode will notify the DFSClient that the replica is stale via the
* ShortCircuitShm.
*/
@Test(timeout=60000) public void testUnlinkingReplicasInFileDescriptorCache() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testUnlinkingReplicasInFileDescriptorCache",sockDir);
conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,1000000000L);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache();
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(0,info.size());
}
}
);
final Path TEST_PATH=new Path("/test_file");
final int TEST_FILE_LEN=8193;
final int SEED=0xFADE0;
DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LEN,(short)1,SEED);
byte contents[]=DFSTestUtil.readFileBuffer(fs,TEST_PATH);
byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents,expected));
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertTrue(info.get(datanode).full.isEmpty());
Assert.assertFalse(info.get(datanode).disabled);
Assert.assertEquals(1,info.get(datanode).notFull.values().size());
DfsClientShm shm=info.get(datanode).notFull.values().iterator().next();
Assert.assertFalse(shm.isDisconnected());
}
}
);
fs.delete(TEST_PATH,false);
GenericTestUtils.waitFor(new Supplier(){
MutableBoolean done=new MutableBoolean(true);
@Override public Boolean get(){
try {
done.setValue(true);
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertTrue(info.get(datanode).full.isEmpty());
Assert.assertFalse(info.get(datanode).disabled);
Assert.assertEquals(1,info.get(datanode).notFull.values().size());
DfsClientShm shm=info.get(datanode).notFull.values().iterator().next();
for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) {
Slot slot=iter.next();
if (slot.isValid()) {
done.setValue(false);
}
}
}
}
);
}
catch ( IOException e) {
LOG.error("error running visitor",e);
}
return done.booleanValue();
}
}
,10,60000);
cluster.shutdown();
sockDir.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAllocShm() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir=new TemporarySocketDirectory();
Configuration conf=createShortCircuitConf("testAllocShm",sockDir);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs=cluster.getFileSystem();
final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache();
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(0,info.size());
}
}
);
DomainPeer peer=getDomainPeerToDn(conf);
MutableBoolean usedPeer=new MutableBoolean(false);
ExtendedBlockId blockId=new ExtendedBlockId(123,"xyz");
final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
Slot slot=cache.allocShmSlot(datanode,peer,usedPeer,blockId,"testAllocShm_client");
Assert.assertNotNull(slot);
Assert.assertTrue(usedPeer.booleanValue());
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
Assert.assertEquals(1,info.size());
PerDatanodeVisitorInfo vinfo=info.get(datanode);
Assert.assertFalse(vinfo.disabled);
Assert.assertEquals(0,vinfo.full.size());
Assert.assertEquals(1,vinfo.notFull.size());
}
}
);
cache.scheduleSlotReleaser(slot);
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
final MutableBoolean done=new MutableBoolean(false);
try {
cache.getDfsClientShmManager().visit(new Visitor(){
@Override public void visit( HashMap info) throws IOException {
done.setValue(info.get(datanode).full.isEmpty() && info.get(datanode).notFull.isEmpty());
}
}
);
}
catch ( IOException e) {
LOG.error("error running visitor",e);
}
return done.booleanValue();
}
}
,10,60000);
cluster.shutdown();
sockDir.close();
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testSkipWithVerifyChecksum() throws IOException {
int size=blockSize;
Configuration conf=new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,true);
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,false);
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,"/tmp/testSkipWithVerifyChecksum._PORT");
DomainSocket.disableBindPathValidation();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
FileSystem fs=cluster.getFileSystem();
try {
Path path=new Path("/");
assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory() == true);
byte[] fileData=AppendTestUtil.randomBytes(seed,size * 3);
Path file1=new Path("filelocal.dat");
FSDataOutputStream stm=createFile(fs,file1,1);
stm.write(fileData);
stm.close();
FSDataInputStream instm=fs.open(file1);
byte[] actual=new byte[fileData.length];
int nread=instm.read(actual,0,3);
long skipped=2 * size + 3;
instm.seek(skipped);
nread=instm.read(actual,(int)(skipped + nread),3);
instm.close();
}
finally {
fs.close();
cluster.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAllocateSlots() throws Exception {
File path=new File(TEST_BASE,"testAllocateSlots");
path.mkdirs();
SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("shm_",new String[]{path.getAbsolutePath()});
FileInputStream stream=factory.createDescriptor("testAllocateSlots",4096);
ShortCircuitShm shm=new ShortCircuitShm(ShmId.createRandom(),stream);
int numSlots=0;
ArrayList slots=new ArrayList();
while (!shm.isFull()) {
Slot slot=shm.allocAndRegisterSlot(new ExtendedBlockId(123L,"test_bp1"));
slots.add(slot);
numSlots++;
}
LOG.info("allocated " + numSlots + " slots before running out.");
int slotIdx=0;
for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) {
Assert.assertTrue(slots.contains(iter.next()));
}
for ( Slot slot : slots) {
Assert.assertFalse(slot.addAnchor());
Assert.assertEquals(slotIdx++,slot.getSlotIdx());
}
for ( Slot slot : slots) {
slot.makeAnchorable();
}
for ( Slot slot : slots) {
Assert.assertTrue(slot.addAnchor());
}
for ( Slot slot : slots) {
slot.removeAnchor();
}
for ( Slot slot : slots) {
shm.unregisterSlot(slot.getSlotIdx());
slot.makeInvalid();
}
shm.free();
stream.close();
FileUtil.fullyDelete(path);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testSaveNamespace() throws Exception {
setUpHaCluster(false);
int exitCode=admin.run(new String[]{"-safemode","enter"});
assertEquals(err.toString().trim(),0,exitCode);
String message="Safe mode is ON in.*";
assertOutputMatches(message + newLine + message+ newLine);
exitCode=admin.run(new String[]{"-saveNamespace"});
assertEquals(err.toString().trim(),0,exitCode);
message="Save namespace successful for.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRefreshNodes() throws Exception {
setUpHaCluster(false);
int exitCode=admin.run(new String[]{"-refreshNodes"});
assertEquals(err.toString().trim(),0,exitCode);
String message="Refresh nodes successful for.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testMetaSave() throws Exception {
setUpHaCluster(false);
int exitCode=admin.run(new String[]{"-metasave","dfs.meta"});
assertEquals(err.toString().trim(),0,exitCode);
String message="Created metasave file dfs.meta in the log directory" + " of namenode.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRefreshServiceAcl() throws Exception {
setUpHaCluster(true);
int exitCode=admin.run(new String[]{"-refreshServiceAcl"});
assertEquals(err.toString().trim(),0,exitCode);
String message="Refresh service acl successful for.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRefreshCallQueue() throws Exception {
setUpHaCluster(false);
int exitCode=admin.run(new String[]{"-refreshCallQueue"});
assertEquals(err.toString().trim(),0,exitCode);
String message="Refresh call queue successful for.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRefreshUserToGroupsMappings() throws Exception {
setUpHaCluster(false);
int exitCode=admin.run(new String[]{"-refreshUserToGroupsMappings"});
assertEquals(err.toString().trim(),0,exitCode);
String message="Refresh user to groups mapping successful for.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRestoreFailedStorage() throws Exception {
setUpHaCluster(false);
int exitCode=admin.run(new String[]{"-restoreFailedStorage","check"});
assertEquals(err.toString().trim(),0,exitCode);
String message="restoreFailedStorage is set to false for.*";
assertOutputMatches(message + newLine + message+ newLine);
exitCode=admin.run(new String[]{"-restoreFailedStorage","true"});
assertEquals(err.toString().trim(),0,exitCode);
message="restoreFailedStorage is set to true for.*";
assertOutputMatches(message + newLine + message+ newLine);
exitCode=admin.run(new String[]{"-restoreFailedStorage","false"});
assertEquals(err.toString().trim(),0,exitCode);
message="restoreFailedStorage is set to false for.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testSetSafeMode() throws Exception {
setUpHaCluster(false);
int exitCode=admin.run(new String[]{"-safemode","enter"});
assertEquals(err.toString().trim(),0,exitCode);
String message="Safe mode is ON in.*";
assertOutputMatches(message + newLine + message+ newLine);
exitCode=admin.run(new String[]{"-safemode","get"});
assertEquals(err.toString().trim(),0,exitCode);
message="Safe mode is ON in.*";
assertOutputMatches(message + newLine + message+ newLine);
exitCode=admin.run(new String[]{"-safemode","leave"});
assertEquals(err.toString().trim(),0,exitCode);
message="Safe mode is OFF in.*";
assertOutputMatches(message + newLine + message+ newLine);
exitCode=admin.run(new String[]{"-safemode","get"});
assertEquals(err.toString().trim(),0,exitCode);
message="Safe mode is OFF in.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRefreshSuperUserGroupsConfiguration() throws Exception {
setUpHaCluster(false);
int exitCode=admin.run(new String[]{"-refreshSuperUserGroupsConfiguration"});
assertEquals(err.toString().trim(),0,exitCode);
String message="Refresh super user groups configuration successful for.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testSetBalancerBandwidth() throws Exception {
setUpHaCluster(false);
int exitCode=admin.run(new String[]{"-setBalancerBandwidth","10"});
assertEquals(err.toString().trim(),0,exitCode);
String message="Balancer bandwidth is set to 10 for.*";
assertOutputMatches(message + newLine + message+ newLine);
}
InternalCallVerifier EqualityVerifier
@Test public void testTransitionToActive() throws Exception {
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
assertEquals(0,runTool("-transitionToActive","nn1"));
Mockito.verify(mockProtocol).transitionToActive(reqInfoCaptor.capture());
assertEquals(RequestSource.REQUEST_BY_USER,reqInfoCaptor.getValue().getSource());
}
InternalCallVerifier BooleanVerifier
/**
* Test case to check whether both the name node is active or not
* @throws Exception
*/
@Test public void testTransitionToActiveWhenOtherNamenodeisActive() throws Exception {
NameNode nn1=cluster.getNameNode(0);
NameNode nn2=cluster.getNameNode(1);
if (nn1.getState() != null && !nn1.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(0);
}
if (nn2.getState() != null && !nn2.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(1);
}
assertTrue(nn1.isStandbyState());
assertTrue(nn2.isStandbyState());
runTool("-transitionToActive","nn1");
runTool("-transitionToActive","nn2");
assertFalse("Both namenodes cannot be active",nn1.isActiveState() && nn2.isActiveState());
if (nn1.getState() != null && !nn1.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(0);
}
if (nn2.getState() != null && !nn2.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(1);
}
assertTrue(nn1.isStandbyState());
assertTrue(nn2.isStandbyState());
runTool("-transitionToActive","nn1");
runTool("-transitionToActive","nn2","--forceactive");
assertFalse("Both namenodes cannot be active even though with forceActive",nn1.isActiveState() && nn2.isActiveState());
cluster.shutdownNameNode(0);
if (nn2.getState() != null && !nn2.getState().equals(HAServiceState.STANDBY.name())) {
cluster.transitionToStandby(1);
}
assertTrue(nn2.isStandbyState());
assertFalse(cluster.isNameNodeUp(0));
runTool("-transitionToActive","nn2","--forceactive");
assertTrue("Namenode nn2 should be active",nn2.isActiveState());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStateTransition() throws Exception {
NameNode nnode1=cluster.getNameNode(0);
assertTrue(nnode1.isStandbyState());
assertEquals(0,runTool("-transitionToActive","nn1"));
assertFalse(nnode1.isStandbyState());
assertEquals(0,runTool("-transitionToStandby","nn1"));
assertTrue(nnode1.isStandbyState());
NameNode nnode2=cluster.getNameNode(1);
assertTrue(nnode2.isStandbyState());
assertEquals(0,runTool("-transitionToActive","nn2"));
assertFalse(nnode2.isStandbyState());
assertEquals(0,runTool("-transitionToStandby","nn2"));
assertTrue(nnode2.isStandbyState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Call fetch token using http server
*/
@Test public void expectedTokenIsRetrievedFromHttp() throws Exception {
final Token testToken=new Token("id".getBytes(),"pwd".getBytes(),FakeRenewer.KIND,new Text("127.0.0.1:1234"));
WebHdfsFileSystem fs=mock(WebHdfsFileSystem.class);
doReturn(testToken).when(fs).getDelegationToken(anyString());
Path p=new Path(f.getRoot().getAbsolutePath(),tokenFile);
DelegationTokenFetcher.saveDelegationToken(conf,fs,null,p);
Credentials creds=Credentials.readTokenStorageFile(p,conf);
Iterator> itr=creds.getAllTokens().iterator();
assertTrue("token not exist error",itr.hasNext());
Token> fetchedToken=itr.next();
Assert.assertArrayEquals("token wrong identifier error",testToken.getIdentifier(),fetchedToken.getIdentifier());
Assert.assertArrayEquals("token wrong password error",testToken.getPassword(),fetchedToken.getPassword());
DelegationTokenFetcher.renewTokens(conf,p);
Assert.assertEquals(testToken,FakeRenewer.getLastRenewed());
DelegationTokenFetcher.cancelTokens(conf,p);
Assert.assertEquals(testToken,FakeRenewer.getLastCanceled());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void TestGetConfExcludeCommand() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,System.getProperty("test.build.data","target/test/data") + "/Getconf/");
Path hostsFile=new Path(dir,"hosts");
Path excludeFile=new Path(dir,"exclude");
conf.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
writeConfigFile(hostsFile,null);
writeConfigFile(excludeFile,null);
String[] args={"-excludeFile"};
String ret=runTool(conf,args,true);
assertEquals(excludeFile.toUri().getPath(),ret.trim());
cleanupFile(localFileSys,excludeFile.getParent());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testExtraArgsThrowsError() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.set("mykey","myval");
String[] args={"-namenodes","unexpected-arg"};
assertTrue(runTool(conf,args,false).contains("Did not expect argument: unexpected-arg"));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
/**
* Test empty configuration
*/
@Test(timeout=10000) public void testEmptyConf() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration(false);
getAddressListFromTool(TestType.NAMENODE,conf,false);
System.out.println(getAddressListFromTool(TestType.BACKUP,conf,false));
getAddressListFromTool(TestType.SECONDARY,conf,false);
getAddressListFromTool(TestType.NNRPCADDRESSES,conf,false);
for ( Command cmd : Command.values()) {
String arg=cmd.getName();
CommandHandler handler=Command.getHandler(arg);
assertNotNull("missing handler: " + cmd,handler);
if (handler.key != null) {
String[] args={handler.key};
runTool(conf,args,false);
}
}
}
BranchVerifier InternalCallVerifier BooleanVerifier
/**
* Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP},{@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES}
*/
@Test(timeout=10000) public void testTool() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration(false);
for ( Command cmd : Command.values()) {
CommandHandler handler=Command.getHandler(cmd.getName());
if (handler.key != null && !"-confKey".equals(cmd.getName())) {
String[] args={cmd.getName()};
conf.set(handler.key,"value");
assertTrue(runTool(conf,args,true).contains("value"));
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testGetSpecificKey() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
conf.set("mykey"," myval ");
String[] args={"-confKey","mykey"};
String toolResult=runTool(conf,args,true);
assertEquals(String.format("myval%n"),toolResult);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void TestGetConfIncludeCommand() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
localFileSys=FileSystem.getLocal(conf);
Path workingDir=localFileSys.getWorkingDirectory();
Path dir=new Path(workingDir,System.getProperty("test.build.data","target/test/data") + "/Getconf/");
Path hostsFile=new Path(dir,"hosts");
Path excludeFile=new Path(dir,"exclude");
conf.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath());
writeConfigFile(hostsFile,null);
writeConfigFile(excludeFile,null);
String[] args={"-includeFile"};
String ret=runTool(conf,args,true);
assertEquals(hostsFile.toUri().getPath(),ret.trim());
cleanupFile(localFileSys,excludeFile.getParent());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRecoveryMode() throws IOException {
String edits=nnHelper.generateEdits();
FileOutputStream os=new FileOutputStream(edits,true);
FileChannel editsFile=os.getChannel();
editsFile.truncate(editsFile.size() - 5);
String editsParsedXml=folder.newFile("editsRecoveredParsed.xml").getAbsolutePath();
String editsReparsed=folder.newFile("editsRecoveredReparsed").getAbsolutePath();
String editsParsedXml2=folder.newFile("editsRecoveredParsed2.xml").getAbsolutePath();
assertEquals(-1,runOev(edits,editsParsedXml,"xml",false));
assertEquals(0,runOev(edits,editsParsedXml,"xml",true));
assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false));
assertEquals(0,runOev(editsReparsed,editsParsedXml2,"xml",false));
assertTrue("Test round trip",filesEqualIgnoreTrailingZeros(editsParsedXml,editsParsedXml2));
os.close();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the OfflineEditsViewer
*/
@Test public void testGenerated() throws IOException {
String edits=nnHelper.generateEdits();
LOG.info("Generated edits=" + edits);
String editsParsedXml=folder.newFile("editsParsed.xml").getAbsolutePath();
String editsReparsed=folder.newFile("editsParsed").getAbsolutePath();
assertEquals(0,runOev(edits,editsParsedXml,"xml",false));
assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false));
assertTrue("Edits " + edits + " should have all op codes",hasAllOpCodes(edits));
LOG.info("Comparing generated file " + editsReparsed + " with reference file "+ edits);
assertTrue("Generated edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(edits,editsReparsed));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testWebImageViewer() throws IOException, InterruptedException, URISyntaxException {
WebImageViewer viewer=new WebImageViewer(NetUtils.createSocketAddr("localhost:0"));
try {
viewer.initServer(originalFsimage.getAbsolutePath());
int port=viewer.getPort();
URI uri=new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf=new Configuration();
WebHdfsFileSystem webhdfs=(WebHdfsFileSystem)FileSystem.get(uri,conf);
FileStatus[] statuses=webhdfs.listStatus(new Path("/"));
assertEquals(NUM_DIRS + 2,statuses.length);
statuses=webhdfs.listStatus(new Path("/dir0"));
assertEquals(FILES_PER_DIR,statuses.length);
FileStatus status=webhdfs.listStatus(new Path("/dir0/file0"))[0];
FileStatus expected=writtenFiles.get("/dir0/file0");
compareFile(expected,status);
statuses=webhdfs.listStatus(new Path("/emptydir"));
assertEquals(0,statuses.length);
URL url=new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=LISTSTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND,url);
url=new URL("http://localhost:" + port + "/webhdfs/v1?op=LISTSTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND,url);
status=webhdfs.getFileStatus(new Path("/dir0/file0"));
compareFile(expected,status);
url=new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=GETFILESTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND,url);
url=new URL("http://localhost:" + port + "/webhdfs/v1/?op=INVALID");
verifyHttpResponseCode(HttpURLConnection.HTTP_BAD_REQUEST,url);
url=new URL("http://localhost:" + port + "/webhdfs/v1/?op=LISTSTATUS");
HttpURLConnection connection=(HttpURLConnection)url.openConnection();
connection.setRequestMethod("POST");
connection.connect();
assertEquals(HttpURLConnection.HTTP_BAD_METHOD,connection.getResponseCode());
}
finally {
viewer.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFileDistributionCalculator() throws IOException {
StringWriter output=new StringWriter();
PrintWriter o=new PrintWriter(output);
new FileDistributionCalculator(new Configuration(),0,0,o).visit(new RandomAccessFile(originalFsimage,"r"));
o.close();
Pattern p=Pattern.compile("totalFiles = (\\d+)\n");
Matcher matcher=p.matcher(output.getBuffer());
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalFiles=Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS * FILES_PER_DIR,totalFiles);
p=Pattern.compile("totalDirectories = (\\d+)\n");
matcher=p.matcher(output.getBuffer());
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalDirs=Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS + 3,totalDirs);
FileStatus maxFile=Collections.max(writtenFiles.values(),new Comparator(){
@Override public int compare( FileStatus first, FileStatus second){
return first.getLen() < second.getLen() ? -1 : ((first.getLen() == second.getLen()) ? 0 : 1);
}
}
);
p=Pattern.compile("maxFileSize = (\\d+)\n");
matcher=p.matcher(output.getBuffer());
assertTrue(matcher.find() && matcher.groupCount() == 1);
assertEquals(maxFile.getLen(),Long.parseLong(matcher.group(1)));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testWebImageViewerForAcl() throws IOException, InterruptedException, URISyntaxException {
WebImageViewer viewer=new WebImageViewer(NetUtils.createSocketAddr("localhost:0"));
try {
viewer.initServer(originalFsimage.getAbsolutePath());
int port=viewer.getPort();
URI uri=new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf=new Configuration();
WebHdfsFileSystem webhdfs=(WebHdfsFileSystem)FileSystem.get(uri,conf);
AclStatus acl=webhdfs.getAclStatus(new Path("/dirWithNoAcl"));
assertEquals(writtenAcls.get("/dirWithNoAcl"),acl);
acl=webhdfs.getAclStatus(new Path("/dirWithDefaultAcl"));
assertEquals(writtenAcls.get("/dirWithDefaultAcl"),acl);
acl=webhdfs.getAclStatus(new Path("/noAcl"));
assertEquals(writtenAcls.get("/noAcl"),acl);
acl=webhdfs.getAclStatus(new Path("/withAcl"));
assertEquals(writtenAcls.get("/withAcl"),acl);
acl=webhdfs.getAclStatus(new Path("/withSeveralAcls"));
assertEquals(writtenAcls.get("/withSeveralAcls"),acl);
URL url=new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=GETACLSTATUS");
HttpURLConnection connection=(HttpURLConnection)url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,connection.getResponseCode());
}
finally {
viewer.shutdown();
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetSet() throws IOException {
BestEffortLongFile f=new BestEffortLongFile(FILE,12345L);
try {
assertEquals(12345L,f.get());
assertTrue(FILE.exists());
Random r=new Random();
for (int i=0; i < 100; i++) {
long newVal=r.nextLong();
f.set(newVal);
assertEquals(newVal,f.get());
BestEffortLongFile f2=new BestEffortLongFile(FILE,999L);
try {
assertEquals(newVal,f2.get());
}
finally {
IOUtils.closeStream(f2);
}
}
}
finally {
IOUtils.closeStream(f);
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBasics(){
final int N_ELEMS=100000;
ChunkedArrayList l=new ChunkedArrayList();
assertTrue(l.isEmpty());
for (int i=0; i < N_ELEMS; i++) {
l.add(i);
}
assertFalse(l.isEmpty());
assertEquals(N_ELEMS,l.size());
assertTrue(l.getNumChunks() > 10);
assertEquals(8192,l.getMaxChunkSize());
}
InternalCallVerifier EqualityVerifier
@Test public void testBasicsSkip() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("hello"),3);
assertEquals(3,s.available());
assertEquals(2,s.skip(2));
assertEquals(1,s.skip(2));
assertEquals(0,s.skip(2));
}
InternalCallVerifier EqualityVerifier
@Test public void testBasicsReadArray() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("hello"),3);
assertEquals(3,s.available());
byte[] buf=new byte[10];
assertEquals(2,s.read(buf,0,2));
assertEquals('h',buf[0]);
assertEquals('e',buf[1]);
assertEquals(1,s.read(buf,0,2));
assertEquals('l',buf[0]);
assertEquals(-1,s.read(buf,0,2));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testReadNotEnough() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5);
assertEquals(2,s.available());
assertEquals((int)'h',s.read());
assertEquals((int)'e',s.read());
try {
s.read();
fail("Read when should be out of data");
}
catch ( EOFException e) {
}
}
InternalCallVerifier EqualityVerifier
@Test public void testBasicsReadSingle() throws IOException {
ExactSizeInputStream s=new ExactSizeInputStream(byteStream("hello"),3);
assertEquals(3,s.available());
assertEquals((int)'h',s.read());
assertEquals((int)'e',s.read());
assertEquals((int)'l',s.read());
assertEquals(-1,s.read());
assertEquals(0,s.available());
}
InternalCallVerifier EqualityVerifier
@Test public void testCapacity(){
LOG.info("Test capacity");
float maxF=LightWeightHashSet.DEFAULT_MAX_LOAD_FACTOR;
float minF=LightWeightHashSet.DEFAUT_MIN_LOAD_FACTOR;
set=new LightWeightHashSet(1,maxF,minF);
assertEquals(LightWeightHashSet.MINIMUM_CAPACITY,set.getCapacity());
set=new LightWeightHashSet(30,maxF,minF);
assertEquals(Math.max(LightWeightHashSet.MINIMUM_CAPACITY,32),set.getCapacity());
set=new LightWeightHashSet(64,maxF,minF);
assertEquals(Math.max(LightWeightHashSet.MINIMUM_CAPACITY,64),set.getCapacity());
set.addAll(list);
int expCap=LightWeightHashSet.MINIMUM_CAPACITY;
while (expCap < NUM && maxF * expCap < NUM) expCap<<=1;
assertEquals(expCap,set.getCapacity());
set.clear();
set.addAll(list);
int toRemove=set.size() - (int)(set.getCapacity() * minF) + 1;
for (int i=0; i < toRemove; i++) {
set.remove(list.get(i));
}
assertEquals(Math.max(LightWeightHashSet.MINIMUM_CAPACITY,expCap / 2),set.getCapacity());
LOG.info("Test capacity - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollNMulti(){
LOG.info("Test pollN multi");
set.addAll(list);
List poll=set.pollN(0);
assertEquals(0,poll.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
poll=set.pollN(10);
assertEquals(10,poll.size());
for ( Integer i : poll) {
assertTrue(list.contains(i));
assertFalse(set.contains(i));
}
poll=set.pollN(1000);
assertEquals(NUM - 10,poll.size());
for ( Integer i : poll) {
assertTrue(list.contains(i));
}
assertTrue(set.isEmpty());
assertEquals(0,set.size());
LOG.info("Test pollN multi - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveOne(){
LOG.info("Test remove one");
assertTrue(set.add(list.get(0)));
assertEquals(1,set.size());
assertTrue(set.remove(list.get(0)));
assertEquals(0,set.size());
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertTrue(set.add(list.get(0)));
assertEquals(1,set.size());
iter=set.iterator();
assertTrue(iter.hasNext());
LOG.info("Test remove one - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOther(){
LOG.info("Test other");
assertTrue(set.addAll(list));
assertTrue(set.removeAll(list));
assertTrue(set.isEmpty());
List sub=new LinkedList();
for (int i=0; i < 10; i++) {
sub.add(list.get(i));
}
assertTrue(set.addAll(list));
assertTrue(set.removeAll(sub));
assertFalse(set.isEmpty());
assertEquals(NUM - 10,set.size());
for ( Integer i : sub) {
assertFalse(set.contains(i));
}
assertFalse(set.containsAll(sub));
List sub2=new LinkedList();
for (int i=10; i < NUM; i++) {
sub2.add(list.get(i));
}
assertTrue(set.containsAll(sub2));
Integer[] array=set.toArray(new Integer[0]);
assertEquals(NUM - 10,array.length);
for (int i=0; i < array.length; i++) {
assertTrue(sub2.contains(array[i]));
}
assertEquals(NUM - 10,set.size());
Object[] array2=set.toArray();
assertEquals(NUM - 10,array2.length);
for (int i=0; i < array2.length; i++) {
assertTrue(sub2.contains(array2[i]));
}
LOG.info("Test other - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollNMultiArray(){
LOG.info("Test pollN multi array");
set.addAll(list);
Integer[] poll=new Integer[10];
poll=set.pollToArray(poll);
assertEquals(10,poll.length);
for ( Integer i : poll) {
assertTrue(list.contains(i));
assertFalse(set.contains(i));
}
poll=new Integer[NUM];
poll=set.pollToArray(poll);
assertEquals(NUM - 10,poll.length);
for (int i=0; i < NUM - 10; i++) {
assertTrue(list.contains(poll[i]));
}
assertTrue(set.isEmpty());
assertEquals(0,set.size());
set.addAll(list);
poll=new Integer[NUM];
poll=set.pollToArray(poll);
assertTrue(set.isEmpty());
assertEquals(0,set.size());
assertEquals(NUM,poll.length);
for (int i=0; i < NUM; i++) {
assertTrue(list.contains(poll[i]));
}
set.addAll(list);
poll=new Integer[0];
poll=set.pollToArray(poll);
for (int i=0; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
assertEquals(0,poll.length);
LOG.info("Test pollN multi array- DONE");
}
InternalCallVerifier IdentityVerifier NullVerifier HybridVerifier
@Test public void testGetElement(){
LightWeightHashSet objSet=new LightWeightHashSet();
TestObject objA=new TestObject("object A");
TestObject equalToObjA=new TestObject("object A");
TestObject objB=new TestObject("object B");
objSet.add(objA);
objSet.add(objB);
assertSame(objA,objSet.getElement(objA));
assertSame(objA,objSet.getElement(equalToObjA));
assertSame(objB,objSet.getElement(objB));
assertNull(objSet.getElement(new TestObject("not in set")));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollAll(){
LOG.info("Test poll all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
List poll=set.pollAll();
assertEquals(0,set.size());
assertTrue(set.isEmpty());
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
for ( Integer i : poll) {
assertTrue(list.contains(i));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test poll all - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testEmptyBasic(){
LOG.info("Test empty basic");
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertEquals(0,set.size());
assertTrue(set.isEmpty());
LOG.info("Test empty - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testRemoveMulti(){
LOG.info("Test remove multi");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM / 2; i++) {
assertTrue(set.remove(list.get(i)));
}
for (int i=0; i < NUM / 2; i++) {
assertFalse(set.contains(list.get(i)));
}
for (int i=NUM / 2; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
LOG.info("Test remove multi - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOneElementBasic(){
LOG.info("Test one element basic");
set.add(list.get(0));
assertEquals(1,set.size());
assertFalse(set.isEmpty());
Iterator iter=set.iterator();
assertTrue(iter.hasNext());
assertEquals(list.get(0),iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testRemoveAll(){
LOG.info("Test remove all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM; i++) {
assertTrue(set.remove(list.get(i)));
}
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertTrue(set.isEmpty());
LOG.info("Test remove all - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testClear(){
LOG.info("Test clear");
set.addAll(list);
assertEquals(NUM,set.size());
assertFalse(set.isEmpty());
set.clear();
assertEquals(0,set.size());
assertTrue(set.isEmpty());
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test clear - DONE");
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMultiBasic(){
LOG.info("Test multi element basic");
for ( Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(),set.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
for ( Integer i : list) {
assertFalse(set.add(i));
}
for ( Integer i : list) {
assertTrue(set.contains(i));
}
Iterator iter=set.iterator();
int num=0;
while (iter.hasNext()) {
Integer next=iter.next();
assertNotNull(next);
assertTrue(list.contains(next));
num++;
}
assertEquals(list.size(),num);
LOG.info("Test multi element basic - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollMulti(){
LOG.info("Test poll multi");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM / 2; i++) {
assertEquals(list.get(i),set.pollFirst());
}
assertEquals(NUM / 2,set.size());
for (int i=0; i < NUM / 2; i++) {
assertFalse(set.contains(list.get(i)));
}
for (int i=NUM / 2; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
int num=NUM / 2;
while (iter.hasNext()) {
assertEquals(list.get(num++),iter.next());
}
assertEquals(num,NUM);
for (int i=0; i < NUM / 2; i++) {
assertTrue(set.add(list.get(i)));
}
assertEquals(NUM,set.size());
for (int i=NUM / 2; i < NUM; i++) {
assertEquals(list.get(i),set.pollFirst());
}
for (int i=0; i < NUM / 2; i++) {
assertEquals(list.get(i),set.pollFirst());
}
assertEquals(0,set.size());
assertTrue(set.isEmpty());
LOG.info("Test poll multi - DONE");
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testResetBookmarkPlacesBookmarkAtHead(){
set.addAll(list);
Iterator it=set.getBookmark();
final int numAdvance=set.size() / 2;
for (int i=0; i < numAdvance; i++) {
it.next();
}
assertEquals(it.next(),list.get(numAdvance));
set.resetBookmark();
it=set.getBookmark();
assertEquals(it.next(),list.get(0));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testBookmarkSetToHeadOnAddToEmpty(){
LOG.info("Test bookmark is set after adding to previously empty set.");
Iterator it=set.getBookmark();
assertFalse(it.hasNext());
set.add(list.get(0));
set.add(list.get(1));
it=set.getBookmark();
assertTrue(it.hasNext());
assertEquals(it.next(),list.get(0));
assertEquals(it.next(),list.get(1));
assertFalse(it.hasNext());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testGetBookmarkReturnsBookmarkIterator(){
LOG.info("Test getBookmark returns proper iterator");
assertTrue(set.addAll(list));
Iterator bookmark=set.getBookmark();
assertEquals(bookmark.next(),list.get(0));
final int numAdvance=list.size() / 2;
for (int i=1; i < numAdvance; i++) {
bookmark.next();
}
Iterator bookmark2=set.getBookmark();
assertEquals(bookmark2.next(),list.get(numAdvance));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveMulti(){
LOG.info("Test remove multi");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM / 2; i++) {
assertTrue(set.remove(list.get(i)));
}
for (int i=0; i < NUM / 2; i++) {
assertFalse(set.contains(list.get(i)));
}
for (int i=NUM / 2; i < NUM; i++) {
assertTrue(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
int num=NUM / 2;
while (iter.hasNext()) {
assertEquals(list.get(num++),iter.next());
}
assertEquals(num,NUM);
LOG.info("Test remove multi - DONE");
}
InternalCallVerifier EqualityVerifier
@Test public void testPollNOne(){
LOG.info("Test pollN one");
set.add(list.get(0));
List l=set.pollN(10);
assertEquals(1,l.size());
assertEquals(list.get(0),l.get(0));
LOG.info("Test pollN one - DONE");
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testEmptyBasic(){
LOG.info("Test empty basic");
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertEquals(0,set.size());
assertTrue(set.isEmpty());
assertNull(set.pollFirst());
assertEquals(0,set.pollAll().size());
assertEquals(0,set.pollN(10).size());
LOG.info("Test empty - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultiBasic(){
LOG.info("Test multi element basic");
for ( Integer i : list) {
assertTrue(set.add(i));
}
assertEquals(list.size(),set.size());
for ( Integer i : list) {
assertTrue(set.contains(i));
}
for ( Integer i : list) {
assertFalse(set.add(i));
}
for ( Integer i : list) {
assertTrue(set.contains(i));
}
Iterator iter=set.iterator();
int num=0;
while (iter.hasNext()) {
assertEquals(list.get(num++),iter.next());
}
assertEquals(list.size(),num);
LOG.info("Test multi element basic - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testBookmarkAdvancesOnRemoveOfSameElement(){
LOG.info("Test that the bookmark advances if we remove its element.");
assertTrue(set.add(list.get(0)));
assertTrue(set.add(list.get(1)));
assertTrue(set.add(list.get(2)));
Iterator it=set.getBookmark();
assertEquals(it.next(),list.get(0));
set.remove(list.get(1));
it=set.getBookmark();
assertEquals(it.next(),list.get(2));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPollOneElement(){
LOG.info("Test poll one element");
set.add(list.get(0));
assertEquals(list.get(0),set.pollFirst());
assertNull(set.pollFirst());
LOG.info("Test poll one element - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollAll(){
LOG.info("Test poll all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
while (set.pollFirst() != null) ;
assertEquals(0,set.size());
assertTrue(set.isEmpty());
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test poll all - DONE");
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveOne(){
LOG.info("Test remove one");
assertTrue(set.add(list.get(0)));
assertEquals(1,set.size());
assertTrue(set.remove(list.get(0)));
assertEquals(0,set.size());
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertNull(set.pollFirst());
assertEquals(0,set.pollAll().size());
assertEquals(0,set.pollN(10).size());
assertTrue(set.add(list.get(0)));
assertEquals(1,set.size());
iter=set.iterator();
assertTrue(iter.hasNext());
LOG.info("Test remove one - DONE");
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testClear(){
LOG.info("Test clear");
set.addAll(list);
assertEquals(NUM,set.size());
assertFalse(set.isEmpty());
Iterator bkmrkIt=set.getBookmark();
for (int i=0; i < set.size() / 2 + 1; i++) {
bkmrkIt.next();
}
assertTrue(bkmrkIt.hasNext());
set.clear();
assertEquals(0,set.size());
assertTrue(set.isEmpty());
bkmrkIt=set.getBookmark();
assertFalse(bkmrkIt.hasNext());
assertEquals(0,set.pollAll().size());
assertEquals(0,set.pollN(10).size());
assertNull(set.pollFirst());
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
LOG.info("Test clear - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testRemoveAll(){
LOG.info("Test remove all");
for ( Integer i : list) {
assertTrue(set.add(i));
}
for (int i=0; i < NUM; i++) {
assertTrue(set.remove(list.get(i)));
}
for (int i=0; i < NUM; i++) {
assertFalse(set.contains(list.get(i)));
}
Iterator iter=set.iterator();
assertFalse(iter.hasNext());
assertTrue(set.isEmpty());
LOG.info("Test remove all - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOther(){
LOG.info("Test other");
assertTrue(set.addAll(list));
Integer[] array=set.toArray(new Integer[0]);
assertEquals(NUM,array.length);
for (int i=0; i < array.length; i++) {
assertTrue(list.contains(array[i]));
}
assertEquals(NUM,set.size());
Object[] array2=set.toArray();
assertEquals(NUM,array2.length);
for (int i=0; i < array2.length; i++) {
assertTrue(list.contains(array2[i]));
}
LOG.info("Test capacity - DONE");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOneElementBasic(){
LOG.info("Test one element basic");
set.add(list.get(0));
assertEquals(1,set.size());
assertFalse(set.isEmpty());
Iterator iter=set.iterator();
assertTrue(iter.hasNext());
assertEquals(list.get(0),iter.next());
assertFalse(iter.hasNext());
LOG.info("Test one element basic - DONE");
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPollNMulti(){
LOG.info("Test pollN multi");
set.addAll(list);
List l=set.pollN(10);
assertEquals(10,l.size());
for (int i=0; i < 10; i++) {
assertEquals(list.get(i),l.get(i));
}
l=set.pollN(1000);
assertEquals(NUM - 10,l.size());
for (int i=10; i < NUM; i++) {
assertEquals(list.get(i),l.get(i - 10));
}
assertTrue(set.isEmpty());
assertEquals(0,set.size());
LOG.info("Test pollN multi - DONE");
}
InternalCallVerifier EqualityVerifier
@Test public void testGetSimpleAuthDefaultConfiguration() throws ServletException {
AuthFilter filter=new AuthFilter();
Map m=new HashMap();
FilterConfig config=new DummyFilterConfig(m);
Properties p=filter.getConfiguration("random",config);
Assert.assertEquals("true",p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED));
}
InternalCallVerifier EqualityVerifier
@Test public void testGetConfiguration() throws ServletException {
AuthFilter filter=new AuthFilter();
Map m=new HashMap();
m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,"xyz/thehost@REALM");
m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,"thekeytab");
FilterConfig config=new DummyFilterConfig(m);
Properties p=filter.getConfiguration("random",config);
Assert.assertEquals("xyz/thehost@REALM",p.getProperty("kerberos.principal"));
Assert.assertEquals("thekeytab",p.getProperty("kerberos.keytab"));
Assert.assertEquals("true",p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED));
}
InternalCallVerifier EqualityVerifier
@Test public void testGetSimpleAuthDisabledConfiguration() throws ServletException {
AuthFilter filter=new AuthFilter();
Map m=new HashMap();
m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED,"false");
FilterConfig config=new DummyFilterConfig(m);
Properties p=filter.getConfiguration("random",config);
Assert.assertEquals("false",p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testByteRange() throws IOException {
ByteRangeInputStream.URLOpener oMock=getMockURLOpener(new URL("http://test"));
ByteRangeInputStream.URLOpener rMock=getMockURLOpener(null);
ByteRangeInputStream bris=new ByteRangeInputStreamImpl(oMock,rMock);
bris.seek(0);
assertEquals("getPos wrong",0,bris.getPos());
bris.read();
assertEquals("Initial call made incorrectly (offset check)",0,bris.startPos);
assertEquals("getPos should return 1 after reading one byte",1,bris.getPos());
verify(oMock,times(1)).connect(0,false);
bris.read();
assertEquals("getPos should return 2 after reading two bytes",2,bris.getPos());
verify(oMock,times(1)).connect(0,false);
rMock.setURL(new URL("http://resolvedurl/"));
bris.seek(100);
bris.read();
assertEquals("Seek to 100 bytes made incorrectly (offset Check)",100,bris.startPos);
assertEquals("getPos should return 101 after reading one byte",101,bris.getPos());
verify(rMock,times(1)).connect(100,true);
bris.seek(101);
bris.read();
verify(rMock,times(1)).connect(100,true);
verify(rMock,times(0)).connect(101,true);
bris.seek(2500);
bris.read();
assertEquals("Seek to 2500 bytes made incorrectly (offset Check)",2500,bris.startPos);
doReturn(getMockConnection(null)).when(rMock).connect(anyLong(),anyBoolean());
bris.seek(500);
try {
bris.read();
fail("Exception should be thrown when content-length is not given");
}
catch ( IOException e) {
assertTrue("Incorrect response message: " + e.getMessage(),e.getMessage().startsWith(HttpHeaders.CONTENT_LENGTH + " is missing: "));
}
bris.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSWebHdfsFileSystem() throws Exception {
FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,"swebhdfs");
final Path f=new Path("/testswebhdfs");
FSDataOutputStream os=fs.create(f);
os.write(23);
os.close();
Assert.assertTrue(fs.exists(f));
InputStream is=fs.open(f);
Assert.assertEquals(23,is.read());
is.close();
fs.close();
}
InternalCallVerifier EqualityVerifier
@Test public void testToJsonFromAclStatus(){
String jsonString="{\"AclStatus\":{\"entries\":[\"user:user1:rwx\",\"group::rw-\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
AclStatus.Builder aclStatusBuilder=new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.stickyBit(false);
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"user1",ALL),aclEntry(ACCESS,GROUP,READ_WRITE));
aclStatusBuilder.addEntries(aclSpec);
Assert.assertEquals(jsonString,JsonUtil.toJsonString(aclStatusBuilder.build()));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testToDatanodeInfoWithName() throws Exception {
Map response=new HashMap();
String name="127.0.0.1:1004";
response.put("name",name);
response.put("hostName","localhost");
response.put("storageID","fake-id");
response.put("infoPort",1338l);
response.put("ipcPort",1339l);
response.put("capacity",1024l);
response.put("dfsUsed",512l);
response.put("remaining",512l);
response.put("blockPoolUsed",512l);
response.put("lastUpdate",0l);
response.put("xceiverCount",4096l);
response.put("networkLocation","foo.bar.baz");
response.put("adminState","NORMAL");
response.put("cacheCapacity",123l);
response.put("cacheUsed",321l);
DatanodeInfo di=JsonUtil.toDatanodeInfo(response);
Assert.assertEquals(name,di.getXferAddr());
Map r=JsonUtil.toJsonMap(di);
Assert.assertEquals(name,r.get("name"));
Assert.assertEquals("127.0.0.1",r.get("ipAddr"));
Assert.assertEquals(1004,(int)(Integer)r.get("xferPort"));
String[] badNames={"127.0.0.1","127.0.0.1:",":","127.0.0.1:sweet",":123"};
for ( String badName : badNames) {
response.put("name",badName);
checkDecodeFailure(response);
}
response.remove("name");
checkDecodeFailure(response);
response.put("ipAddr","127.0.0.1");
checkDecodeFailure(response);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testToAclStatus(){
String jsonString="{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}";
Map,?> json=(Map,?>)JSON.parse(jsonString);
List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"user1",READ_WRITE),aclEntry(ACCESS,GROUP,READ_WRITE),aclEntry(ACCESS,OTHER,READ_EXECUTE));
AclStatus.Builder aclStatusBuilder=new AclStatus.Builder();
aclStatusBuilder.owner("testuser");
aclStatusBuilder.group("supergroup");
aclStatusBuilder.addEntries(aclSpec);
aclStatusBuilder.stickyBit(false);
Assert.assertEquals("Should be equal",aclStatusBuilder.build(),JsonUtil.toAclStatus(json));
}
InternalCallVerifier NullVerifier
@Test public void testGetRemoteToken() throws IOException, URISyntaxException {
Configuration conf=new Configuration();
DummyFs fs=spy(new DummyFs());
Token token=new Token(new byte[0],new byte[0],DummyFs.TOKEN_KIND,new Text("127.0.0.1:1234"));
doReturn(token).when(fs).getDelegationToken(anyString());
doReturn(token).when(fs).getRenewToken();
fs.initialize(new URI("dummyfs://127.0.0.1:1234"),conf);
fs.tokenAspect.ensureTokenInitialized();
verify(fs).setDelegationToken(token);
assertNotNull(Whitebox.getInternalState(fs.tokenAspect,"dtRenewer"));
assertNotNull(Whitebox.getInternalState(fs.tokenAspect,"action"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
@Test public void testRenewal() throws Exception {
Configuration conf=new Configuration();
Token> token1=mock(Token.class);
Token> token2=mock(Token.class);
final long renewCycle=100;
DelegationTokenRenewer.renewCycle=renewCycle;
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("foo",new String[]{"bar"});
DummyFs fs=spy(new DummyFs());
doReturn(token1).doReturn(token2).when(fs).getDelegationToken(null);
doReturn(token1).when(fs).getRenewToken();
doThrow(new IOException("renew failed")).when(token1).renew(conf);
doThrow(new IOException("get failed")).when(fs).addDelegationTokens(null,null);
final URI uri=new URI("dummyfs://127.0.0.1:1234");
TokenAspect tokenAspect=new TokenAspect(fs,SecurityUtil.buildTokenService(uri),DummyFs.TOKEN_KIND);
fs.initialize(uri,conf);
tokenAspect.initDelegationToken(ugi);
tokenAspect.ensureTokenInitialized();
DelegationTokenRenewer.RenewAction> action=getActionFromTokenAspect(tokenAspect);
verify(fs).setDelegationToken(token1);
assertTrue(action.isValid());
Thread.sleep(renewCycle * 2);
assertSame(action,getActionFromTokenAspect(tokenAspect));
assertFalse(action.isValid());
tokenAspect.ensureTokenInitialized();
verify(fs,times(2)).getDelegationToken(anyString());
verify(fs).setDelegationToken(token2);
assertNotSame(action,getActionFromTokenAspect(tokenAspect));
action=getActionFromTokenAspect(tokenAspect);
assertTrue(action.isValid());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testInitWithUGIToken() throws IOException, URISyntaxException {
Configuration conf=new Configuration();
DummyFs fs=spy(new DummyFs());
doReturn(null).when(fs).getDelegationToken(anyString());
Token token=new Token(new byte[0],new byte[0],DummyFs.TOKEN_KIND,new Text("127.0.0.1:1234"));
fs.ugi.addToken(token);
fs.ugi.addToken(new Token(new byte[0],new byte[0],new Text("Other token"),new Text("127.0.0.1:8021")));
assertEquals("wrong tokens in user",2,fs.ugi.getTokens().size());
fs.emulateSecurityEnabled=true;
fs.initialize(new URI("dummyfs://127.0.0.1:1234"),conf);
fs.tokenAspect.ensureTokenInitialized();
verify(fs).setDelegationToken(token);
verify(fs,never()).getDelegationToken(anyString());
assertNull(Whitebox.getInternalState(fs.tokenAspect,"dtRenewer"));
assertNull(Whitebox.getInternalState(fs.tokenAspect,"action"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test snapshot deletion through WebHdfs
*/
@Test public void testWebHdfsDeleteSnapshot() throws Exception {
MiniDFSCluster cluster=null;
final Configuration conf=WebHdfsTestUtil.createConf();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
final Path foo=new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo,"s1");
final Path spath=webHdfs.createSnapshot(foo,null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1");
Assert.assertTrue(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo,"s1");
Assert.assertFalse(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo,spath.getName());
Assert.assertFalse(webHdfs.exists(spath));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testLargeDirectory() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
final int listLimit=2;
conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT,listLimit);
FsPermission.setUMask(conf,new FsPermission((short)0077));
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME).setPermission(new Path("/"),new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL));
UserGroupInformation.setLoginUser(UserGroupInformation.createUserForTesting("not-superuser",new String[]{"not-supergroup"}));
UserGroupInformation.createUserForTesting("me",new String[]{"my-group"}).doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws IOException, URISyntaxException {
FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
Path d=new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d));
for (int i=0; i < listLimit * 3; i++) {
Path p=new Path(d,"file-" + i);
Assert.assertTrue(fs.createNewFile(p));
}
Assert.assertEquals(listLimit * 3,fs.listStatus(d).length);
return null;
}
}
);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=300000) public void testNumericalUserName() throws Exception {
final Configuration conf=WebHdfsTestUtil.createConf();
conf.set(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,"^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$");
final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
cluster.waitActive();
WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME).setPermission(new Path("/"),new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL));
UserGroupInformation.createUserForTesting("123",new String[]{"my-group"}).doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws IOException, URISyntaxException {
FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
Path d=new Path("/my-dir");
Assert.assertTrue(fs.mkdirs(d));
return null;
}
}
);
}
finally {
cluster.shutdown();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* Test snapshot creation through WebHdfs
*/
@Test public void testWebHdfsCreateSnapshot() throws Exception {
MiniDFSCluster cluster=null;
final Configuration conf=WebHdfsTestUtil.createConf();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
final Path foo=new Path("/foo");
dfs.mkdirs(foo);
try {
webHdfs.createSnapshot(foo);
fail("Cannot create snapshot on a non-snapshottable directory");
}
catch ( Exception e) {
GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory",e);
}
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo,"s1");
final Path spath=webHdfs.createSnapshot(foo,null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1");
Assert.assertTrue(webHdfs.exists(s1path));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test snapshot rename through WebHdfs
*/
@Test public void testWebHdfsRenameSnapshot() throws Exception {
MiniDFSCluster cluster=null;
final Configuration conf=WebHdfsTestUtil.createConf();
try {
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs=cluster.getFileSystem();
final FileSystem webHdfs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME);
final Path foo=new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo,"s1");
final Path s1path=SnapshotTestHelper.getSnapshotRoot(foo,"s1");
Assert.assertTrue(webHdfs.exists(s1path));
webHdfs.renameSnapshot(foo,"s1","s2");
Assert.assertFalse(webHdfs.exists(s1path));
final Path s2path=SnapshotTestHelper.getSnapshotRoot(foo,"s2");
Assert.assertTrue(webHdfs.exists(s2path));
webHdfs.deleteSnapshot(foo,"s2");
Assert.assertFalse(webHdfs.exists(s2path));
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHA() throws IOException {
Configuration conf=DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
MiniDFSCluster cluster=null;
FileSystem fs=null;
try {
cluster=new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(0).build();
HATestUtil.setFailoverConfigurations(cluster,conf,LOGICAL_NAME);
cluster.waitActive();
fs=FileSystem.get(WEBHDFS_URI,conf);
cluster.transitionToActive(0);
final Path dir=new Path("/test");
Assert.assertTrue(fs.mkdirs(dir));
cluster.shutdownNameNode(0);
cluster.transitionToActive(1);
final Path dir2=new Path("/test2");
Assert.assertTrue(fs.mkdirs(dir2));
}
finally {
IOUtils.cleanup(null,fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testEncodedPathUrl() throws IOException, URISyntaxException {
Configuration conf=new Configuration();
final WebHdfsFileSystem webhdfs=(WebHdfsFileSystem)FileSystem.get(uri,conf);
String pathName="/hdtest010%2C60020%2C1371000602151.1371058984668";
Path fsPath=new Path(pathName);
URL encodedPathUrl=webhdfs.toUrl(PutOpParam.Op.CREATE,fsPath);
Assert.assertEquals(WebHdfsFileSystem.PATH_PREFIX + pathName,encodedPathUrl.toURI().getPath());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testRedirect() throws Exception {
final String dir="/testRedirect/";
final String filename="file";
final Path p=new Path(dir,filename);
final String[] writeStrings=createStrings("write to webhdfs ","write");
final String[] appendStrings=createStrings("append to webhdfs ","append");
for (int i=0; i < webhdfs.length; i++) {
final FSDataOutputStream out=webhdfs[i].create(p);
out.write(writeStrings[i].getBytes());
out.close();
}
for (int i=0; i < webhdfs.length; i++) {
final long expected=writeStrings[i].length();
Assert.assertEquals(expected,webhdfs[i].getFileStatus(p).getLen());
}
for (int i=0; i < webhdfs.length; i++) {
final FSDataInputStream in=webhdfs[i].open(p);
for (int c, j=0; (c=in.read()) != -1; j++) {
Assert.assertEquals(writeStrings[i].charAt(j),c);
}
in.close();
}
for (int i=0; i < webhdfs.length; i++) {
final FSDataOutputStream out=webhdfs[i].append(p);
out.write(appendStrings[i].getBytes());
out.close();
}
for (int i=0; i < webhdfs.length; i++) {
final long expected=writeStrings[i].length() + appendStrings[i].length();
Assert.assertEquals(expected,webhdfs[i].getFileStatus(p).getLen());
}
for (int i=0; i < webhdfs.length; i++) {
final StringBuilder b=new StringBuilder();
final FSDataInputStream in=webhdfs[i].open(p);
for (int c; (c=in.read()) != -1; ) {
b.append((char)c);
}
final int wlen=writeStrings[i].length();
Assert.assertEquals(writeStrings[i],b.substring(0,wlen));
Assert.assertEquals(appendStrings[i],b.substring(wlen));
in.close();
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testReplicationParam(){
final ReplicationParam p=new ReplicationParam(ReplicationParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
Assert.assertEquals((short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,DFSConfigKeys.DFS_REPLICATION_DEFAULT),p.getValue(conf));
new ReplicationParam((short)1);
try {
new ReplicationParam((short)0);
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testBufferSizeParam(){
final BufferSizeParam p=new BufferSizeParam(BufferSizeParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
Assert.assertEquals(conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),p.getValue(conf));
new BufferSizeParam(1);
try {
new BufferSizeParam(0);
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testRenameOptionSetParam(){
final RenameOptionSetParam p=new RenameOptionSetParam(Options.Rename.OVERWRITE,Options.Rename.NONE);
final RenameOptionSetParam p1=new RenameOptionSetParam(p.getValueString());
Assert.assertEquals(p1.getValue(),EnumSet.of(Options.Rename.OVERWRITE,Options.Rename.NONE));
}
InternalCallVerifier EqualityVerifier
@Test public void testXAttrEncodingParam(){
final XAttrEncodingParam p=new XAttrEncodingParam(XAttrCodec.BASE64);
Assert.assertEquals(p.getEncoding(),XAttrCodec.BASE64);
final XAttrEncodingParam p1=new XAttrEncodingParam(p.getValueString());
Assert.assertEquals(p1.getEncoding(),XAttrCodec.BASE64);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testBlockSizeParam(){
final BlockSizeParam p=new BlockSizeParam(BlockSizeParam.DEFAULT);
Assert.assertEquals(null,p.getValue());
Assert.assertEquals(conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),p.getValue(conf));
new BlockSizeParam(1L);
try {
new BlockSizeParam(0L);
Assert.fail();
}
catch ( IllegalArgumentException e) {
LOG.info("EXPECTED: " + e);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testXAttrSetFlagParam(){
EnumSet flag=EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE);
final XAttrSetFlagParam p=new XAttrSetFlagParam(flag);
Assert.assertEquals(p.getFlag(),flag);
final XAttrSetFlagParam p1=new XAttrSetFlagParam(p.getValueString());
Assert.assertEquals(p1.getFlag(),flag);
}
InternalCallVerifier EqualityVerifier
@Test public void testRequestQuoting() throws Exception {
HttpServletRequest mockReq=Mockito.mock(HttpServletRequest.class);
HttpServer2.QuotingInputFilter.RequestQuoter quoter=new HttpServer2.QuotingInputFilter.RequestQuoter(mockReq);
Mockito.doReturn("a
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHttpsCookie() throws IOException, GeneralSecurityException {
URL base=new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(1)));
HttpsURLConnection conn=(HttpsURLConnection)new URL(base,"/echo").openConnection();
conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());
String header=conn.getHeaderField("Set-Cookie");
List cookies=HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Assert.assertTrue(header.contains("; HttpOnly"));
Assert.assertTrue(cookies.get(0).getSecure());
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHttpCookie() throws IOException {
URL base=new URL("http://" + NetUtils.getHostPortString(server.getConnectorAddress(0)));
HttpURLConnection conn=(HttpURLConnection)new URL(base,"/echo").openConnection();
String header=conn.getHeaderField("Set-Cookie");
List cookies=HttpCookie.parse(header);
Assert.assertTrue(!cookies.isEmpty());
Assert.assertTrue(header.contains("; HttpOnly"));
Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
InternalCallVerifier EqualityVerifier
@Test public void testParameterPropagation(){
HttpRequestLogAppender requestLogAppender=new HttpRequestLogAppender();
requestLogAppender.setFilename("jetty-namenode-yyyy_mm_dd.log");
requestLogAppender.setRetainDays(17);
assertEquals("Filename mismatch","jetty-namenode-yyyy_mm_dd.log",requestLogAppender.getFilename());
assertEquals("Retain days mismatch",17,requestLogAppender.getRetainDays());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Verify the administrator access for /logs, /stacks, /conf, /logLevel and
* /metrics servlets.
* @throws Exception
*/
@Test public void testAuthorizationOfDefaultServlets() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,true);
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,DummyFilterInitializer.class.getName());
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MyGroupsProvider.class.getName());
Groups.getUserToGroupsMappingService(conf);
MyGroupsProvider.clearMapping();
MyGroupsProvider.mapping.put("userA",Arrays.asList("groupA"));
MyGroupsProvider.mapping.put("userB",Arrays.asList("groupB"));
MyGroupsProvider.mapping.put("userC",Arrays.asList("groupC"));
MyGroupsProvider.mapping.put("userD",Arrays.asList("groupD"));
MyGroupsProvider.mapping.put("userE",Arrays.asList("groupE"));
HttpServer2 myServer=new HttpServer2.Builder().setName("test").addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf).setACL(new AccessControlList("userA,userB groupC,groupD")).build();
myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,conf);
myServer.start();
String serverURL="http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
for ( String servlet : new String[]{"conf","logs","stacks","logLevel","metrics"}) {
for ( String user : new String[]{"userA","userB","userC","userD"}) {
assertEquals(HttpURLConnection.HTTP_OK,getHttpStatusCode(serverURL + servlet,user));
}
assertEquals(HttpURLConnection.HTTP_FORBIDDEN,getHttpStatusCode(serverURL + servlet,"userE"));
}
myServer.stop();
}
InternalCallVerifier BooleanVerifier
@Test public void testRequestQuoterWithNotNull() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
String[] values=new String[]{"abc","def"};
Mockito.doReturn(values).when(request).getParameterValues("dummy");
RequestQuoter requestQuoter=new RequestQuoter(request);
String[] parameterValues=requestQuoter.getParameterValues("dummy");
Assert.assertTrue("It should return Parameter Values",Arrays.equals(values,parameterValues));
}
InternalCallVerifier EqualityVerifier
@Test public void testRequestQuoterWithNull() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.doReturn(null).when(request).getParameterValues("dummy");
RequestQuoter requestQuoter=new RequestQuoter(request);
String[] parameterValues=requestQuoter.getParameterValues("dummy");
Assert.assertEquals("It should return null " + "when there are no values for the parameter",null,parameterValues);
}
InternalCallVerifier BooleanVerifier
@Test public void testHasAdministratorAccess() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,false);
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(null);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRemoteUser()).thenReturn(null);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context,request,response));
response=Mockito.mock(HttpServletResponse.class);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true);
Assert.assertFalse(HttpServer2.hasAdministratorAccess(context,request,response));
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN),Mockito.anyString());
response=Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getRemoteUser()).thenReturn("foo");
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context,request,response));
response=Mockito.mock(HttpServletResponse.class);
AccessControlList acls=Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertFalse(HttpServer2.hasAdministratorAccess(context,request,response));
Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN),Mockito.anyString());
response=Mockito.mock(HttpServletResponse.class);
Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(true);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertTrue(HttpServer2.hasAdministratorAccess(context,request,response));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testBindAddress() throws Exception {
checkBindAddress("localhost",0,false).stop();
HttpServer2 myServer=checkBindAddress("localhost",0,false);
HttpServer2 myServer2=null;
try {
int port=myServer.getConnectorAddress(0).getPort();
myServer2=checkBindAddress("localhost",port,true);
port=myServer2.getConnectorAddress(0).getPort();
myServer2.stop();
assertNull(myServer2.getConnectorAddress(0));
myServer2.openListeners();
assertEquals(port,myServer2.getConnectorAddress(0).getPort());
}
finally {
myServer.stop();
if (myServer2 != null) {
myServer2.stop();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testRequiresAuthorizationAccess() throws Exception {
Configuration conf=new Configuration();
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
Assert.assertTrue(HttpServer2.isInstrumentationAccessAllowed(context,request,response));
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,true);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true);
AccessControlList acls=Mockito.mock(AccessControlList.class);
Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false);
Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
Assert.assertFalse(HttpServer2.isInstrumentationAccessAllowed(context,request,response));
}
InternalCallVerifier EqualityVerifier
/**
* Verify the access for /logs, /stacks, /conf, /logLevel and /metrics
* servlets, when authentication filters are set, but authorization is not
* enabled.
* @throws Exception
*/
@Test public void testDisabledAuthorizationOfDefaultServlets() throws Exception {
Configuration conf=new Configuration();
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,DummyFilterInitializer.class.getName());
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MyGroupsProvider.class.getName());
Groups.getUserToGroupsMappingService(conf);
MyGroupsProvider.clearMapping();
MyGroupsProvider.mapping.put("userA",Arrays.asList("groupA"));
MyGroupsProvider.mapping.put("userB",Arrays.asList("groupB"));
HttpServer2 myServer=new HttpServer2.Builder().setName("test").addEndpoint(new URI("http://localhost:0")).setFindPort(true).build();
myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,conf);
myServer.start();
String serverURL="http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
for ( String servlet : new String[]{"conf","logs","stacks","logLevel","metrics"}) {
for ( String user : new String[]{"userA","userB"}) {
assertEquals(HttpURLConnection.HTTP_OK,getHttpStatusCode(serverURL + servlet,user));
}
}
myServer.stop();
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test that the server is alive once started
* @throws Throwableon failure
*/
@Test public void testWepAppContextAfterServerStop() throws Throwable {
HttpServer2 server=null;
String key="test.attribute.key";
String value="test.attribute.value";
server=createTestServer();
assertNotLive(server);
server.start();
server.setAttribute(key,value);
assertAlive(server);
assertEquals(value,server.getAttribute(key));
stop(server);
assertNull("Server context should have cleared",server.getAttribute(key));
}
UtilityVerifier InternalCallVerifier
/**
* Test that an invalid webapp triggers an exception
* @throws Throwable if something went wrong
*/
@Test public void testMissingServerResource() throws Throwable {
try {
HttpServer2 server=createServer("NoSuchWebapp");
String serverDescription=server.toString();
stop(server);
fail("Expected an exception, got " + serverDescription);
}
catch ( FileNotFoundException expected) {
log.debug("Expected exception " + expected,expected);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testFilter() throws Exception {
FilterConfig config=mockConfig("myuser");
StaticUserFilter suf=new StaticUserFilter();
suf.init(config);
ArgumentCaptor wrapperArg=ArgumentCaptor.forClass(HttpServletRequestWrapper.class);
FilterChain chain=mock(FilterChain.class);
suf.doFilter(mock(HttpServletRequest.class),mock(ServletResponse.class),chain);
Mockito.verify(chain).doFilter(wrapperArg.capture(),Mockito.anyObject());
HttpServletRequestWrapper wrapper=wrapperArg.getValue();
assertEquals("myuser",wrapper.getUserPrincipal().getName());
assertEquals("myuser",wrapper.getRemoteUser());
suf.destroy();
}
InternalCallVerifier EqualityVerifier
@Test public void testConfiguration(){
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER,"joe");
assertEquals("joe",StaticUserWebFilter.getUsernameFromConf(conf));
}
InternalCallVerifier EqualityVerifier
@Test public void testOldStyleConfiguration(){
Configuration conf=new Configuration();
conf.set("dfs.web.ugi","joe,group1,group2");
assertEquals("joe",StaticUserWebFilter.getUsernameFromConf(conf));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOldFormat() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null);
in.reset(out.getData(),out.getLength());
@SuppressWarnings("deprecation") String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not labelled as an array of int",i.getClass().getName(),className);
int length=in.readInt();
assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not expected length",i.length,length);
int[] readValue=new int[length];
try {
for (int i=0; i < length; i++) {
readValue[i]=(int)((Integer)ObjectWritable.readObject(in,null));
}
}
catch ( Exception e) {
fail("The int[] written by ObjectWritable as a non-compact array " + "was corrupted. Failed to correctly read int[] of length " + length + ". Got exception:\n"+ StringUtils.stringifyException(e));
}
assertTrue("The int[] written by ObjectWritable as a non-compact array " + "was corrupted.",Arrays.equals(i,readValue));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("deprecation") public void testObjectLabeling() throws IOException {
ObjectWritable.writeObject(out,i,i.getClass(),null,true);
ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable(i);
ObjectWritable.writeObject(out,apw,apw.getClass(),null,true);
in.reset(out.getData(),out.getLength());
String className=UTF8.readString(in);
assertEquals("The int[] written by ObjectWritable was not labelled as " + "an ArrayPrimitiveWritable.Internal",ArrayPrimitiveWritable.Internal.class.getName(),className);
ArrayPrimitiveWritable.Internal apwi=new ArrayPrimitiveWritable.Internal();
apwi.readFields(in);
assertEquals("The ArrayPrimitiveWritable.Internal component type was corrupted",int.class,apw.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable.Internal was corrupted",Arrays.equals(i,(int[])(apwi.get())));
String declaredClassName=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "declaredClass ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),declaredClassName);
className=UTF8.readString(in);
assertEquals("The APW written by ObjectWritable was not labelled as " + "class ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),className);
ArrayPrimitiveWritable apw2=new ArrayPrimitiveWritable();
apw2.readFields(in);
assertEquals("The ArrayPrimitiveWritable component type was corrupted",int.class,apw2.getComponentType());
assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable was corrupted",Arrays.equals(i,(int[])(apw2.get())));
}
InternalCallVerifier EqualityVerifier
@Test public void testHash() throws Exception {
byte[] owen="owen".getBytes();
BytesWritable buf=new BytesWritable(owen);
assertEquals(4347922,buf.hashCode());
buf.setCapacity(10000);
assertEquals(4347922,buf.hashCode());
buf.setSize(0);
assertEquals(1,buf.hashCode());
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* This test was written as result of adding the new zero
* copy constructor and set method to BytesWritable. These
* methods allow users to specify the backing buffer of the
* BytesWritable instance and a length.
*/
@Test public void testZeroCopy(){
byte[] bytes="brock".getBytes();
BytesWritable zeroBuf=new BytesWritable(bytes,bytes.length);
BytesWritable copyBuf=new BytesWritable(bytes);
assertTrue("copy took place, backing array != array passed to constructor",bytes == zeroBuf.getBytes());
assertTrue("length of BW should backing byte array",zeroBuf.getLength() == bytes.length);
assertEquals("objects with same backing array should be equal",zeroBuf,copyBuf);
assertEquals("string repr of objects with same backing array should be equal",zeroBuf.toString(),copyBuf.toString());
assertTrue("compare order objects with same backing array should be equal",zeroBuf.compareTo(copyBuf) == 0);
assertTrue("hash of objects with same backing array should be equal",zeroBuf.hashCode() == copyBuf.hashCode());
byte[] buffer=new byte[bytes.length * 5];
zeroBuf.set(buffer,0,buffer.length);
zeroBuf.set(bytes,0,bytes.length);
assertEquals("buffer created with (array, len) has bad contents",zeroBuf,copyBuf);
assertTrue("buffer created with (array, len) has bad length",zeroBuf.getLength() == copyBuf.getLength());
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSizeChange() throws Exception {
byte[] hadoop="hadoop".getBytes();
BytesWritable buf=new BytesWritable(hadoop);
int size=buf.getLength();
int orig_capacity=buf.getCapacity();
buf.setSize(size * 2);
int new_capacity=buf.getCapacity();
System.arraycopy(buf.getBytes(),0,buf.getBytes(),size,size);
assertTrue(new_capacity >= size * 2);
assertEquals(size * 2,buf.getLength());
assertTrue(new_capacity != orig_capacity);
buf.setSize(size * 4);
assertTrue(new_capacity != buf.getCapacity());
for (int i=0; i < size * 2; ++i) {
assertEquals(hadoop[i % size],buf.getBytes()[i]);
}
assertEquals(size * 4,buf.copyBytes().length);
buf.setCapacity(1);
assertEquals(1,buf.getLength());
assertEquals(hadoop[0],buf.getBytes()[0]);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test {@link ByteWritable}
* methods compareTo(), toString(), equals()
*/
@Test public void testObjectCommonMethods(){
byte b=0x9;
ByteWritable bw=new ByteWritable();
bw.set(b);
assertTrue("testSetByteWritable error",bw.get() == b);
assertTrue("testSetByteWritable error < 0",bw.compareTo(new ByteWritable((byte)0xA)) < 0);
assertTrue("testSetByteWritable error > 0",bw.compareTo(new ByteWritable((byte)0x8)) > 0);
assertTrue("testSetByteWritable error == 0",bw.compareTo(new ByteWritable((byte)0x9)) == 0);
assertTrue("testSetByteWritable equals error !!!",bw.equals(new ByteWritable((byte)0x9)));
assertTrue("testSetByteWritable equals error !!!",!bw.equals(new ByteWritable((byte)0xA)));
assertTrue("testSetByteWritable equals error !!!",!bw.equals(new IntWritable(1)));
assertEquals("testSetByteWritable error ","9",bw.toString());
}
InternalCallVerifier EqualityVerifier
@Test public void testDataOutputByteBufferCompatibility() throws IOException {
DataOutputBuffer dob=new DataOutputBuffer();
DataOutputByteBuffer dobb=new DataOutputByteBuffer();
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
writeJunk(dob,r,seed,1000);
writeJunk(dobb,r,seed,1000);
byte[] check=toBytes(dobb.getData(),dobb.getLength());
assertEquals(check.length,dob.getLength());
assertArrayEquals(check,Arrays.copyOf(dob.getData(),dob.getLength()));
dob.reset();
dobb.reset();
writeJunk(dob,r,seed,3000);
writeJunk(dobb,r,seed,3000);
check=toBytes(dobb.getData(),dobb.getLength());
assertEquals(check.length,dob.getLength());
assertArrayEquals(check,Arrays.copyOf(dob.getData(),dob.getLength()));
dob.reset();
dobb.reset();
writeJunk(dob,r,seed,1000);
writeJunk(dobb,r,seed,1000);
check=toBytes(dobb.getData(),dobb.getLength());
assertEquals("Failed Checking length = " + check.length,check.length,dob.getLength());
assertArrayEquals(check,Arrays.copyOf(dob.getData(),dob.getLength()));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* test {@code MapFile.Writer} constructor with IOException
*/
@Test public void testPathExplosionWriterCreation(){
Path path=new Path(TEST_DIR,"testPathExplosionWriterCreation.mapfile");
String TEST_ERROR_MESSAGE="Mkdirs failed to create directory " + path.getName();
MapFile.Writer writer=null;
try {
FileSystem fsSpy=spy(FileSystem.get(conf));
Path pathSpy=spy(path);
when(fsSpy.mkdirs(path)).thenThrow(new IOException(TEST_ERROR_MESSAGE));
when(pathSpy.getFileSystem(conf)).thenReturn(fsSpy);
writer=new MapFile.Writer(conf,pathSpy,MapFile.Writer.keyClass(IntWritable.class),MapFile.Writer.valueClass(IntWritable.class));
fail("fail in testPathExplosionWriterCreation !!!");
}
catch ( IOException ex) {
assertEquals("testPathExplosionWriterCreation ex message error !!!",ex.getMessage(),TEST_ERROR_MESSAGE);
}
catch ( Exception e) {
fail("fail in testPathExplosionWriterCreation. Other ex !!!");
}
finally {
IOUtils.cleanup(null,writer);
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test {@code MapFile.Reader.getClosest()} method
*/
@Test public void testGetClosestOnCurrentApi() throws Exception {
final String TEST_PREFIX="testGetClosestOnCurrentApi.mapfile";
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=createWriter(TEST_PREFIX,Text.class,Text.class);
int FIRST_KEY=1;
for (int i=FIRST_KEY; i < 100; i+=10) {
Text t=new Text(Integer.toString(i));
writer.append(t,t);
}
writer.close();
reader=createReader(TEST_PREFIX,Text.class);
Text key=new Text("55");
Text value=new Text();
Text closest=(Text)reader.getClosest(key,value);
assertEquals(new Text("61"),closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("51"),closest);
final Text explicitKey=new Text("21");
closest=(Text)reader.getClosest(explicitKey,value);
assertEquals(new Text("21"),explicitKey);
key=new Text("00");
closest=(Text)reader.getClosest(key,value);
assertEquals(FIRST_KEY,Integer.parseInt(closest.toString()));
key=new Text("92");
closest=(Text)reader.getClosest(key,value);
assertNull("Not null key in testGetClosestWithNewCode",closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("91"),closest);
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("deprecation") public void testMidKey() throws Exception {
Path dirName=new Path(TEST_DIR,"testMidKey.mapfile");
FileSystem fs=FileSystem.getLocal(conf);
Path qualifiedDirName=fs.makeQualified(dirName);
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),IntWritable.class,IntWritable.class);
writer.append(new IntWritable(1),new IntWritable(1));
writer.close();
reader=new MapFile.Reader(qualifiedDirName,conf);
assertEquals(new IntWritable(1),reader.midKey());
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test getClosest feature.
* @throws Exception
*/
@Test @SuppressWarnings("deprecation") public void testGetClosest() throws Exception {
Path dirName=new Path(TEST_DIR,"testGetClosest.mapfile");
FileSystem fs=FileSystem.getLocal(conf);
Path qualifiedDirName=fs.makeQualified(dirName);
MapFile.Writer.setIndexInterval(conf,3);
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),Text.class,Text.class);
assertEquals(3,writer.getIndexInterval());
final int FIRST_KEY=10;
for (int i=FIRST_KEY; i < 100; i+=10) {
String iStr=Integer.toString(i);
Text t=new Text("00".substring(iStr.length()) + iStr);
writer.append(t,t);
}
writer.close();
reader=new MapFile.Reader(qualifiedDirName,conf);
Text key=new Text("55");
Text value=new Text();
Text closest=(Text)reader.getClosest(key,value);
assertEquals(new Text("60"),closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("50"),closest);
final Text TWENTY=new Text("20");
closest=(Text)reader.getClosest(TWENTY,value);
assertEquals(TWENTY,closest);
closest=(Text)reader.getClosest(TWENTY,value,true);
assertEquals(TWENTY,closest);
key=new Text("00");
closest=(Text)reader.getClosest(key,value);
assertEquals(FIRST_KEY,Integer.parseInt(closest.toString()));
closest=(Text)reader.getClosest(key,value,true);
assertNull(closest);
key=new Text("99");
closest=(Text)reader.getClosest(key,value);
assertNull(closest);
closest=(Text)reader.getClosest(key,value,true);
assertEquals(new Text("90"),closest);
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
/**
* test all available constructor for {@code MapFile.Writer}
*/
@Test @SuppressWarnings("deprecation") public void testDeprecatedConstructors(){
String path=new Path(TEST_DIR,"writes.mapfile").toString();
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
FileSystem fs=FileSystem.getLocal(conf);
writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD,defaultProgressable);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,IntWritable.class,Text.class,CompressionType.RECORD,defaultCodec,defaultProgressable);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class,SequenceFile.CompressionType.RECORD);
assertNotNull(writer);
writer.close();
writer=new MapFile.Writer(conf,fs,path,WritableComparator.get(Text.class),Text.class,CompressionType.RECORD,defaultProgressable);
assertNotNull(writer);
writer.close();
reader=new MapFile.Reader(fs,path,WritableComparator.get(IntWritable.class),conf);
assertNotNull(reader);
assertNotNull("reader key is null !!!",reader.getKeyClass());
assertNotNull("reader value in null",reader.getValueClass());
}
catch ( IOException e) {
fail(e.getMessage());
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* test {@code MapFile.Reader.next(key, value)} for iteration.
*/
@Test public void testReaderKeyIteration(){
final String TEST_METHOD_KEY="testReaderKeyIteration.mapfile";
int SIZE=10;
int ITERATIONS=5;
MapFile.Writer writer=null;
MapFile.Reader reader=null;
try {
writer=createWriter(TEST_METHOD_KEY,IntWritable.class,Text.class);
int start=0;
for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new Text("Value:" + i));
writer.close();
reader=createReader(TEST_METHOD_KEY,IntWritable.class);
Writable startValue=new Text("Value:" + start);
int i=0;
while (i++ < ITERATIONS) {
IntWritable key=new IntWritable(start);
Writable value=startValue;
while (reader.next(key,value)) {
assertNotNull(key);
assertNotNull(value);
}
reader.reset();
}
assertTrue("reader seek error !!!",reader.seek(new IntWritable(SIZE / 2)));
assertFalse("reader seek error !!!",reader.seek(new IntWritable(SIZE * 2)));
}
catch ( IOException ex) {
fail("reader seek error !!!");
}
finally {
IOUtils.cleanup(null,writer,reader);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("deprecation") public void testMidKeyEmpty() throws Exception {
Path dirName=new Path(TEST_DIR,"testMidKeyEmpty.mapfile");
FileSystem fs=FileSystem.getLocal(conf);
Path qualifiedDirName=fs.makeQualified(dirName);
MapFile.Writer writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),IntWritable.class,IntWritable.class);
writer.close();
MapFile.Reader reader=new MapFile.Reader(qualifiedDirName,conf);
try {
assertEquals(null,reader.midKey());
}
finally {
reader.close();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testPutAll(){
SortedMapWritable map1=new SortedMapWritable();
SortedMapWritable map2=new SortedMapWritable();
map1.put(new Text("key"),new Text("value"));
map2.putAll(map1);
assertEquals("map1 entries don't match map2 entries",map1,map2);
assertTrue("map2 doesn't have class information from map1",map2.classToIdMap.containsKey(Text.class) && map2.idToClassMap.containsValue(Text.class));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* the test
*/
@Test @SuppressWarnings("unchecked") public void testSortedMapWritable(){
Text[] keys={new Text("key1"),new Text("key2"),new Text("key3")};
BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes()),new BytesWritable("value3".getBytes())};
SortedMapWritable inMap=new SortedMapWritable();
for (int i=0; i < keys.length; i++) {
inMap.put(keys[i],values[i]);
}
assertEquals(0,inMap.firstKey().compareTo(keys[0]));
assertEquals(0,inMap.lastKey().compareTo(keys[2]));
SortedMapWritable outMap=new SortedMapWritable(inMap);
assertEquals(inMap.size(),outMap.size());
for ( Map.Entry e : inMap.entrySet()) {
assertTrue(outMap.containsKey(e.getKey()));
assertEquals(0,((WritableComparable)outMap.get(e.getKey())).compareTo(e.getValue()));
}
Text[] maps={new Text("map1"),new Text("map2")};
SortedMapWritable mapOfMaps=new SortedMapWritable();
mapOfMaps.put(maps[0],inMap);
mapOfMaps.put(maps[1],outMap);
SortedMapWritable copyOfMapOfMaps=new SortedMapWritable(mapOfMaps);
for (int i=0; i < maps.length; i++) {
assertTrue(copyOfMapOfMaps.containsKey(maps[i]));
SortedMapWritable a=(SortedMapWritable)mapOfMaps.get(maps[i]);
SortedMapWritable b=(SortedMapWritable)copyOfMapOfMaps.get(maps[i]);
assertEquals(a.size(),b.size());
for ( Writable key : a.keySet()) {
assertTrue(b.containsKey(key));
WritableComparable aValue=(WritableComparable)a.get(key);
WritableComparable bValue=(WritableComparable)b.get(key);
assertEquals(0,aValue.compareTo(bValue));
}
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Tests if equal and hashCode method still hold the contract.
*/
@Test public void testEqualsAndHashCode(){
String failureReason;
SortedMapWritable mapA=new SortedMapWritable();
SortedMapWritable mapB=new SortedMapWritable();
failureReason="SortedMapWritable couldn't be initialized. Got null reference";
assertNotNull(failureReason,mapA);
assertNotNull(failureReason,mapB);
assertFalse("equals method returns true when passed null",mapA.equals(null));
assertTrue("Two empty SortedMapWritables are no longer equal",mapA.equals(mapB));
Text[] keys={new Text("key1"),new Text("key2")};
BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes())};
mapA.put(keys[0],values[0]);
mapB.put(keys[1],values[1]);
failureReason="Two SortedMapWritables with different data are now equal";
assertTrue(failureReason,mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason,!mapA.equals(mapB));
assertTrue(failureReason,!mapB.equals(mapA));
mapA.put(keys[1],values[1]);
mapB.put(keys[0],values[0]);
failureReason="Two SortedMapWritables with same entry sets formed in different order are now different";
assertEquals(failureReason,mapA.hashCode(),mapB.hashCode());
assertTrue(failureReason,mapA.equals(mapB));
assertTrue(failureReason,mapB.equals(mapA));
mapA.put(keys[0],values[1]);
mapA.put(keys[1],values[0]);
failureReason="Two SortedMapWritables with different content are now equal";
assertTrue(failureReason,mapA.hashCode() != mapB.hashCode());
assertTrue(failureReason,!mapA.equals(mapB));
assertTrue(failureReason,!mapB.equals(mapA));
}
InternalCallVerifier EqualityVerifier
/**
* Test that number of "unknown" classes is propagated across multiple copies.
*/
@Test @SuppressWarnings("deprecation") public void testForeignClass(){
SortedMapWritable inMap=new SortedMapWritable();
inMap.put(new Text("key"),new UTF8("value"));
inMap.put(new Text("key2"),new UTF8("value2"));
SortedMapWritable outMap=new SortedMapWritable(inMap);
SortedMapWritable copyOfCopy=new SortedMapWritable(outMap);
assertEquals(1,copyOfCopy.getNewClasses());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGzipCodecRead() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!",zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
String tmpDir=System.getProperty("test.build.data","/tmp/");
Path f=new Path(new Path(tmpDir),"testGzipCodecRead.txt.gz");
BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final String msg="This is the message in the file!";
bw.write(msg);
bw.close();
CompressionCodecFactory ccf=new CompressionCodecFactory(conf);
CompressionCodec codec=ccf.getCodec(f);
Decompressor decompressor=CodecPool.getDecompressor(codec);
FileSystem fs=FileSystem.getLocal(conf);
InputStream is=fs.open(f);
is=codec.createInputStream(is,decompressor);
BufferedReader br=new BufferedReader(new InputStreamReader(is));
String line=br.readLine();
assertEquals("Didn't get the same message back!",msg,line);
br.close();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGzipCompatibility() throws IOException {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
LOG.info("seed: " + seed);
DataOutputBuffer dflbuf=new DataOutputBuffer();
GZIPOutputStream gzout=new GZIPOutputStream(dflbuf);
byte[] b=new byte[r.nextInt(128 * 1024 + 1)];
r.nextBytes(b);
gzout.write(b);
gzout.close();
DataInputBuffer gzbuf=new DataInputBuffer();
gzbuf.reset(dflbuf.getData(),dflbuf.getLength());
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
CompressionCodec codec=ReflectionUtils.newInstance(GzipCodec.class,conf);
Decompressor decom=codec.createDecompressor();
assertNotNull(decom);
assertEquals(BuiltInGzipDecompressor.class,decom.getClass());
InputStream gzin=codec.createInputStream(gzbuf,decom);
dflbuf.reset();
IOUtils.copyBytes(gzin,dflbuf,4096);
final byte[] dflchk=Arrays.copyOf(dflbuf.getData(),dflbuf.getLength());
assertArrayEquals(b,dflchk);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCodecPoolGzipReuse() throws Exception {
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,true);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
return;
}
GzipCodec gzc=ReflectionUtils.newInstance(GzipCodec.class,conf);
DefaultCodec dfc=ReflectionUtils.newInstance(DefaultCodec.class,conf);
Compressor c1=CodecPool.getCompressor(gzc);
Compressor c2=CodecPool.getCompressor(dfc);
CodecPool.returnCompressor(c1);
CodecPool.returnCompressor(c2);
assertTrue("Got mismatched ZlibCompressor",c2 != CodecPool.getCompressor(gzc));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGzipLongOverflow() throws IOException {
LOG.info("testGzipLongOverflow");
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false);
assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!",zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
String tmpDir=System.getProperty("test.build.data","/tmp/");
Path f=new Path(new Path(tmpDir),"testGzipLongOverflow.bin.gz");
BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final int NBUF=1024 * 4 + 1;
final char[] buf=new char[1024 * 1024];
for (int i=0; i < buf.length; i++) buf[i]='\0';
for (int i=0; i < NBUF; i++) {
bw.write(buf);
}
bw.close();
CompressionCodecFactory ccf=new CompressionCodecFactory(conf);
CompressionCodec codec=ccf.getCodec(f);
Decompressor decompressor=CodecPool.getDecompressor(codec);
FileSystem fs=FileSystem.getLocal(conf);
InputStream is=fs.open(f);
is=codec.createInputStream(is,decompressor);
BufferedReader br=new BufferedReader(new InputStreamReader(is));
for (int j=0; j < NBUF; j++) {
int n=br.read(buf);
assertEquals("got wrong read length!",n,buf.length);
for (int i=0; i < buf.length; i++) assertEquals("got wrong byte!",buf[i],'\0');
}
br.close();
}
InternalCallVerifier BooleanVerifier
/**
* In {@link CompressorStream#close()}, if {@link CompressorStream#finish()} throws an IOEXception, outputStream
* object was not getting closed.
*/
@Test public void testClose(){
TestCompressorStream testCompressorStream=new TestCompressorStream();
try {
testCompressorStream.close();
}
catch ( IOException e) {
System.out.println("Expected IOException");
}
Assert.assertTrue("closed shoud be true",((CompressorStream)testCompressorStream).closed);
file.delete();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCompressDecompress(){
int BYTE_SIZE=1024 * 54;
byte[] bytes=generate(BYTE_SIZE);
Lz4Compressor compressor=new Lz4Compressor();
try {
compressor.setInput(bytes,0,bytes.length);
assertTrue("Lz4CompressDecompress getBytesRead error !!!",compressor.getBytesRead() > 0);
assertTrue("Lz4CompressDecompress getBytesWritten before compress error !!!",compressor.getBytesWritten() == 0);
byte[] compressed=new byte[BYTE_SIZE];
int cSize=compressor.compress(compressed,0,compressed.length);
assertTrue("Lz4CompressDecompress getBytesWritten after compress error !!!",compressor.getBytesWritten() > 0);
Lz4Decompressor decompressor=new Lz4Decompressor();
decompressor.setInput(compressed,0,cSize);
byte[] decompressed=new byte[BYTE_SIZE];
decompressor.decompress(decompressed,0,decompressed.length);
assertTrue("testLz4CompressDecompress finished error !!!",decompressor.finished());
assertArrayEquals(bytes,decompressed);
compressor.reset();
decompressor.reset();
assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0);
}
catch ( Exception e) {
fail("testLz4CompressDecompress ex error!!!");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier HybridVerifier
@Test public void testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize(){
int BYTES_SIZE=1024 * 64 + 1;
try {
Lz4Compressor compressor=new Lz4Compressor();
byte[] bytes=generate(BYTES_SIZE);
assertTrue("needsInput error !!!",compressor.needsInput());
compressor.setInput(bytes,0,bytes.length);
byte[] emptyBytes=new byte[BYTES_SIZE];
int csize=compressor.compress(emptyBytes,0,bytes.length);
assertTrue("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize error !!!",csize != 0);
}
catch ( Exception ex) {
fail("testSetInputWithBytesSizeMoreThenDefaultLz4CompressorByfferSize ex error !!!");
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSnappyCompressDecompress(){
int BYTE_SIZE=1024 * 54;
byte[] bytes=BytesGenerator.get(BYTE_SIZE);
SnappyCompressor compressor=new SnappyCompressor();
try {
compressor.setInput(bytes,0,bytes.length);
assertTrue("SnappyCompressDecompress getBytesRead error !!!",compressor.getBytesRead() > 0);
assertTrue("SnappyCompressDecompress getBytesWritten before compress error !!!",compressor.getBytesWritten() == 0);
byte[] compressed=new byte[BYTE_SIZE];
int cSize=compressor.compress(compressed,0,compressed.length);
assertTrue("SnappyCompressDecompress getBytesWritten after compress error !!!",compressor.getBytesWritten() > 0);
SnappyDecompressor decompressor=new SnappyDecompressor(BYTE_SIZE);
decompressor.setInput(compressed,0,cSize);
byte[] decompressed=new byte[BYTE_SIZE];
decompressor.decompress(decompressed,0,decompressed.length);
assertTrue("testSnappyCompressDecompress finished error !!!",decompressor.finished());
Assert.assertArrayEquals(bytes,decompressed);
compressor.reset();
decompressor.reset();
assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0);
}
catch ( Exception e) {
fail("testSnappyCompressDecompress ex error!!!");
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSnappyCompressorDecopressorLogicWithCompressionStreams(){
int BYTE_SIZE=1024 * 100;
byte[] bytes=BytesGenerator.get(BYTE_SIZE);
int bufferSize=262144;
int compressionOverhead=(bufferSize / 6) + 32;
DataOutputStream deflateOut=null;
DataInputStream inflateIn=null;
try {
DataOutputBuffer compressedDataBuffer=new DataOutputBuffer();
CompressionOutputStream deflateFilter=new BlockCompressorStream(compressedDataBuffer,new SnappyCompressor(bufferSize),bufferSize,compressionOverhead);
deflateOut=new DataOutputStream(new BufferedOutputStream(deflateFilter));
deflateOut.write(bytes,0,bytes.length);
deflateOut.flush();
deflateFilter.finish();
DataInputBuffer deCompressedDataBuffer=new DataInputBuffer();
deCompressedDataBuffer.reset(compressedDataBuffer.getData(),0,compressedDataBuffer.getLength());
CompressionInputStream inflateFilter=new BlockDecompressorStream(deCompressedDataBuffer,new SnappyDecompressor(bufferSize),bufferSize);
inflateIn=new DataInputStream(new BufferedInputStream(inflateFilter));
byte[] result=new byte[BYTE_SIZE];
inflateIn.read(result);
Assert.assertArrayEquals("original array not equals compress/decompressed array",result,bytes);
}
catch ( IOException e) {
fail("testSnappyCompressorDecopressorLogicWithCompressionStreams ex error !!!");
}
finally {
try {
if (deflateOut != null) deflateOut.close();
if (inflateIn != null) inflateIn.close();
}
catch ( Exception e) {
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testBuiltInGzipDecompressorExceptions(){
BuiltInGzipDecompressor decompresser=new BuiltInGzipDecompressor();
try {
decompresser.setInput(null,0,1);
}
catch ( NullPointerException ex) {
}
catch ( Exception ex) {
fail("testBuiltInGzipDecompressorExceptions npe error " + ex);
}
try {
decompresser.setInput(new byte[]{0},0,-1);
}
catch ( ArrayIndexOutOfBoundsException ex) {
}
catch ( Exception ex) {
fail("testBuiltInGzipDecompressorExceptions aioob error" + ex);
}
assertTrue("decompresser.getBytesRead error",decompresser.getBytesRead() == 0);
assertTrue("decompresser.getRemaining error",decompresser.getRemaining() == 0);
decompresser.reset();
decompresser.end();
InputStream decompStream=null;
try {
int buffSize=1 * 1024;
byte buffer[]=new byte[buffSize];
Decompressor decompressor=new BuiltInGzipDecompressor();
DataInputBuffer gzbuf=new DataInputBuffer();
decompStream=new DecompressorStream(gzbuf,decompressor);
gzbuf.reset(new byte[]{0,0,1,1,1,1,11,1,1,1,1},11);
decompStream.read(buffer);
}
catch ( IOException ioex) {
}
catch ( Exception ex) {
fail("invalid 0 and 1 byte in gzip stream" + ex);
}
try {
int buffSize=1 * 1024;
byte buffer[]=new byte[buffSize];
Decompressor decompressor=new BuiltInGzipDecompressor();
DataInputBuffer gzbuf=new DataInputBuffer();
decompStream=new DecompressorStream(gzbuf,decompressor);
gzbuf.reset(new byte[]{31,-117,7,1,1,1,1,11,1,1,1,1},11);
decompStream.read(buffer);
}
catch ( IOException ioex) {
}
catch ( Exception ex) {
fail("invalid 2 byte in gzip stream" + ex);
}
try {
int buffSize=1 * 1024;
byte buffer[]=new byte[buffSize];
Decompressor decompressor=new BuiltInGzipDecompressor();
DataInputBuffer gzbuf=new DataInputBuffer();
decompStream=new DecompressorStream(gzbuf,decompressor);
gzbuf.reset(new byte[]{31,-117,8,-32,1,1,1,11,1,1,1,1},11);
decompStream.read(buffer);
}
catch ( IOException ioex) {
}
catch ( Exception ex) {
fail("invalid 3 byte in gzip stream" + ex);
}
try {
int buffSize=1 * 1024;
byte buffer[]=new byte[buffSize];
Decompressor decompressor=new BuiltInGzipDecompressor();
DataInputBuffer gzbuf=new DataInputBuffer();
decompStream=new DecompressorStream(gzbuf,decompressor);
gzbuf.reset(new byte[]{31,-117,8,4,1,1,1,11,1,1,1,1},11);
decompStream.read(buffer);
}
catch ( IOException ioex) {
}
catch ( Exception ex) {
fail("invalid 3 byte make hasExtraField" + ex);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testZlibCompressDecompress(){
byte[] rawData=null;
int rawDataSize=0;
rawDataSize=1024 * 64;
rawData=generate(rawDataSize);
try {
ZlibCompressor compressor=new ZlibCompressor();
ZlibDecompressor decompressor=new ZlibDecompressor();
assertFalse("testZlibCompressDecompress finished error",compressor.finished());
compressor.setInput(rawData,0,rawData.length);
assertTrue("testZlibCompressDecompress getBytesRead before error",compressor.getBytesRead() == 0);
compressor.finish();
byte[] compressedResult=new byte[rawDataSize];
int cSize=compressor.compress(compressedResult,0,rawDataSize);
assertTrue("testZlibCompressDecompress getBytesRead ather error",compressor.getBytesRead() == rawDataSize);
assertTrue("testZlibCompressDecompress compressed size no less then original size",cSize < rawDataSize);
decompressor.setInput(compressedResult,0,cSize);
byte[] decompressedBytes=new byte[rawDataSize];
decompressor.decompress(decompressedBytes,0,decompressedBytes.length);
assertArrayEquals("testZlibCompressDecompress arrays not equals ",rawData,decompressedBytes);
compressor.reset();
decompressor.reset();
}
catch ( IOException ex) {
fail("testZlibCompressDecompress ex !!!" + ex);
}
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testZlibCompressorDecompressorWithConfiguration(){
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
byte[] rawData;
int tryNumber=5;
int BYTE_SIZE=10 * 1024;
Compressor zlibCompressor=ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
rawData=generate(BYTE_SIZE);
try {
for (int i=0; i < tryNumber; i++) compressDecompressZlib(rawData,(ZlibCompressor)zlibCompressor,(ZlibDecompressor)zlibDecompressor);
zlibCompressor.reinit(conf);
}
catch ( Exception ex) {
fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
}
}
else {
assertTrue("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
}
}
BranchVerifier InternalCallVerifier BooleanVerifier
@Test public void testZlibCompressorDecompressorSetDictionary(){
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,true);
if (ZlibFactory.isNativeZlibLoaded(conf)) {
Compressor zlibCompressor=ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf);
checkSetDictionaryNullPointerException(zlibCompressor);
checkSetDictionaryNullPointerException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
}
else {
assertTrue("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLocate() throws IOException {
if (skip) return;
writeRecords(3 * records1stBlock);
Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf);
Scanner scanner=reader.createScanner();
locate(scanner,composeSortedKey(KEY,2).getBytes());
locate(scanner,composeSortedKey(KEY,records1stBlock - 1).getBytes());
locate(scanner,composeSortedKey(KEY,records1stBlock).getBytes());
Location locX=locate(scanner,"keyX".getBytes());
Assert.assertEquals(scanner.endLocation,locX);
scanner.close();
reader.close();
}
UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test public void testFailureGetNonExistentMetaBlock() throws IOException {
if (skip) return;
writer.append("keyX".getBytes(),"valueX".getBytes());
DataOutputStream outMeta=writer.prepareMetaBlock("testX",Compression.Algorithm.GZ.getName());
outMeta.write(123);
outMeta.write("foo".getBytes());
outMeta.close();
closeOutput();
Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf);
DataInputStream mb=reader.getMetaBlock("testX");
Assert.assertNotNull(mb);
mb.close();
try {
DataInputStream mbBad=reader.getMetaBlock("testY");
Assert.fail("Error on handling non-existent metablocks.");
}
catch ( Exception e) {
}
reader.close();
}
InternalCallVerifier BooleanVerifier
@Test public void testNoDataEntry() throws IOException {
if (skip) return;
closeOutput();
Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf);
Assert.assertTrue(reader.isSorted());
Scanner scanner=reader.createScanner();
Assert.assertTrue(scanner.atEnd());
scanner.close();
reader.close();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testFailureReadValueManyTimes() throws IOException {
if (skip) return;
writeRecords(5);
Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf);
Scanner scanner=reader.createScanner();
byte[] vbuf=new byte[BUF_SIZE];
int vlen=scanner.entry().getValueLength();
scanner.entry().getValue(vbuf);
Assert.assertEquals(new String(vbuf,0,vlen),VALUE + 0);
try {
scanner.entry().getValue(vbuf);
Assert.fail("Cannot get the value mlutiple times.");
}
catch ( Exception e) {
}
scanner.close();
reader.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test for races in fstat usage
* NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe
* implementation of getpwuid_r.
*/
@Test(timeout=30000) public void testMultiThreadedFstat() throws Exception {
if (Path.WINDOWS) {
return;
}
final FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
final AtomicReference thrown=new AtomicReference();
List statters=new ArrayList();
for (int i=0; i < 10; i++) {
Thread statter=new Thread(){
@Override public void run(){
long et=Time.now() + 5000;
while (Time.now() < et) {
try {
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
assertEquals(System.getProperty("user.name"),stat.getOwner());
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
catch ( Throwable t) {
thrown.set(t);
}
}
}
}
;
statters.add(statter);
statter.start();
}
for ( Thread t : statters) {
t.join();
}
fos.close();
if (thrown.get() != null) {
throw new RuntimeException(thrown.get());
}
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRenameTo() throws Exception {
final File TEST_DIR=new File(new File(System.getProperty("test.build.data","build/test/data")),"renameTest");
assumeTrue(TEST_DIR.mkdirs());
File nonExistentFile=new File(TEST_DIR,"nonexistent");
File targetFile=new File(TEST_DIR,"target");
try {
NativeIO.renameTo(nonExistentFile,targetFile);
Assert.fail();
}
catch ( NativeIOException e) {
if (Path.WINDOWS) {
Assert.assertEquals(String.format("The system cannot find the file specified.%n"),e.getMessage());
}
else {
Assert.assertEquals(Errno.ENOENT,e.getErrno());
}
}
File sourceFile=new File(TEST_DIR,"source");
Assert.assertTrue(sourceFile.createNewFile());
NativeIO.renameTo(sourceFile,sourceFile);
NativeIO.renameTo(sourceFile,targetFile);
sourceFile=new File(TEST_DIR,"source");
Assert.assertTrue(sourceFile.createNewFile());
File badTarget=new File(targetFile,"subdir");
try {
NativeIO.renameTo(sourceFile,badTarget);
Assert.fail();
}
catch ( NativeIOException e) {
if (Path.WINDOWS) {
Assert.assertEquals(String.format("The parameter is incorrect.%n"),e.getMessage());
}
else {
Assert.assertEquals(Errno.ENOTDIR,e.getErrno());
}
}
FileUtils.deleteQuietly(TEST_DIR);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testFstat() throws Exception {
FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat"));
NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD());
fos.close();
LOG.info("Stat: " + String.valueOf(stat));
String owner=stat.getOwner();
String expectedOwner=System.getProperty("user.name");
if (Path.WINDOWS) {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(expectedOwner);
final String adminsGroupString="Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner=adminsGroupString;
}
}
assertEquals(expectedOwner,owner);
assertNotNull(stat.getGroup());
assertTrue(!stat.getGroup().isEmpty());
assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testReadAndWrite() throws Exception {
File path=new File(TEST_BASE,"testReadAndWrite");
path.mkdirs();
SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("woot_",new String[]{path.getAbsolutePath()});
FileInputStream inStream=factory.createDescriptor("testReadAndWrite",4096);
FileOutputStream outStream=new FileOutputStream(inStream.getFD());
outStream.write(101);
inStream.getChannel().position(0);
Assert.assertEquals(101,inStream.read());
inStream.close();
outStream.close();
FileUtil.fullyDelete(path);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testFailoverOnStandbyException() throws UnreliableException, IOException, StandbyException {
UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),RetryPolicies.failoverOnNetworkException(1));
assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString());
try {
unreliable.succeedsOnceThenFailsReturningString();
fail("should not have succeeded twice");
}
catch ( UnreliableException e) {
assertEquals("impl1",e.getMessage());
}
unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(TypeOfExceptionToFailWith.STANDBY_EXCEPTION,TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),RetryPolicies.failoverOnNetworkException(1));
assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString());
assertEquals("impl2",unreliable.succeedsOnceThenFailsReturningString());
}
IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testSucceedsTenTimesThenFailOver() throws UnreliableException, IOException, StandbyException {
UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),new FailOverOnceOnAnyExceptionPolicy());
for (int i=0; i < 10; i++) {
assertEquals("impl1",unreliable.succeedsTenTimesThenFailsReturningString());
}
assertEquals("impl2",unreliable.succeedsTenTimesThenFailsReturningString());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSuccedsOnceThenFailOver() throws UnreliableException, IOException, StandbyException {
UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),new FailOverOnceOnAnyExceptionPolicy());
assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString());
assertEquals("impl2",unreliable.succeedsOnceThenFailsReturningString());
try {
unreliable.succeedsOnceThenFailsReturningString();
fail("should not have succeeded more than twice");
}
catch ( UnreliableException e) {
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testFailoverOnNetworkExceptionIdempotentOperation() throws UnreliableException, IOException, StandbyException {
UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(TypeOfExceptionToFailWith.IO_EXCEPTION,TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),RetryPolicies.failoverOnNetworkException(1));
assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString());
try {
unreliable.succeedsOnceThenFailsReturningString();
fail("should not have succeeded twice");
}
catch ( IOException e) {
assertEquals("impl1",e.getMessage());
}
assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningStringIdempotent());
assertEquals("impl2",unreliable.succeedsOnceThenFailsReturningStringIdempotent());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that concurrent failed method invocations only result in a single
* failover.
*/
@Test public void testConcurrentMethodFailures() throws InterruptedException {
FlipFlopProxyProvider proxyProvider=new FlipFlopProxyProvider(UnreliableInterface.class,new SynchronizedUnreliableImplementation("impl1",TypeOfExceptionToFailWith.STANDBY_EXCEPTION,2),new UnreliableImplementation("impl2",TypeOfExceptionToFailWith.STANDBY_EXCEPTION));
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,proxyProvider,RetryPolicies.failoverOnNetworkException(10));
ConcurrentMethodThread t1=new ConcurrentMethodThread(unreliable);
ConcurrentMethodThread t2=new ConcurrentMethodThread(unreliable);
t1.start();
t2.start();
t1.join();
t2.join();
assertEquals("impl2",t1.result);
assertEquals("impl2",t2.result);
assertEquals(1,proxyProvider.getFailoversOccurred());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Ensure that when all configured services are throwing StandbyException
* that we fail over back and forth between them until one is no longer
* throwing StandbyException.
*/
@Test public void testFailoverBetweenMultipleStandbys() throws UnreliableException, StandbyException, IOException {
final long millisToSleep=10000;
final UnreliableImplementation impl1=new UnreliableImplementation("impl1",TypeOfExceptionToFailWith.STANDBY_EXCEPTION);
FlipFlopProxyProvider proxyProvider=new FlipFlopProxyProvider(UnreliableInterface.class,impl1,new UnreliableImplementation("impl2",TypeOfExceptionToFailWith.STANDBY_EXCEPTION));
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,proxyProvider,RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,10,1000,10000));
new Thread(){
@Override public void run(){
ThreadUtil.sleepAtLeastIgnoreInterrupts(millisToSleep);
impl1.setIdentifier("renamed-impl1");
}
}
.start();
String result=unreliable.failsIfIdentifierDoesntMatch("renamed-impl1");
assertEquals("renamed-impl1",result);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRetryInterruptible() throws Throwable {
final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,retryUpToMaximumTimeWithFixedSleep(10,10,TimeUnit.SECONDS));
final CountDownLatch latch=new CountDownLatch(1);
final AtomicReference futureThread=new AtomicReference();
ExecutorService exec=Executors.newSingleThreadExecutor();
Future future=exec.submit(new Callable(){
@Override public Throwable call() throws Exception {
futureThread.set(Thread.currentThread());
latch.countDown();
try {
unreliable.alwaysFailsWithFatalException();
}
catch ( UndeclaredThrowableException ute) {
return ute.getCause();
}
return null;
}
}
);
latch.await();
Thread.sleep(1000);
assertTrue(futureThread.get().isAlive());
futureThread.get().interrupt();
Throwable e=future.get(1,TimeUnit.SECONDS);
assertNotNull(e);
assertEquals(InterruptedException.class,e.getClass());
assertEquals("sleep interrupted",e.getMessage());
}
InternalCallVerifier NullVerifier
@Test public void testGetSerializer(){
assertNotNull("A valid class must be returned for default Writable SerDe",factory.getSerializer(Writable.class));
assertNull("A null should be returned if there are no serializers found.",factory.getSerializer(TestSerializationFactory.class));
}
InternalCallVerifier NullVerifier
@Test public void testGetDeserializer(){
assertNotNull("A valid class must be returned for default Writable SerDe",factory.getDeserializer(Writable.class));
assertNull("A null should be returned if there are no deserializers found",factory.getDeserializer(TestSerializationFactory.class));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testWritableConfigurable() throws Exception {
conf.set(CONF_TEST_KEY,CONF_TEST_VALUE);
FooGenericWritable generic=new FooGenericWritable();
generic.setConf(conf);
Baz baz=new Baz();
generic.set(baz);
Baz result=SerializationTestUtil.testSerialization(conf,baz);
assertEquals(baz,result);
assertNotNull(result.getConf());
}
InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings({"rawtypes","unchecked"}) public void testWritableComparatorJavaSerialization() throws Exception {
Serialization ser=new JavaSerialization();
Serializer serializer=ser.getSerializer(TestWC.class);
DataOutputBuffer dob=new DataOutputBuffer();
serializer.open(dob);
TestWC orig=new TestWC(0);
serializer.serialize(orig);
serializer.close();
Deserializer deserializer=ser.getDeserializer(TestWC.class);
DataInputBuffer dib=new DataInputBuffer();
dib.reset(dob.getData(),0,dob.getLength());
deserializer.open(dib);
TestWC deser=deserializer.deserialize(null);
deserializer.close();
assertEquals(orig,deser);
}
InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test(timeout=60000) public void testSwapUnderContention() throws InterruptedException {
manager=new CallQueueManager(queueClass,5000,"",null);
ArrayList producers=new ArrayList();
ArrayList consumers=new ArrayList();
HashMap threads=new HashMap();
for (int i=0; i < 50; i++) {
Putter p=new Putter(manager,-1,-1);
Thread pt=new Thread(p);
producers.add(p);
threads.put(p,pt);
pt.start();
}
for (int i=0; i < 20; i++) {
Taker t=new Taker(manager,-1,-1);
Thread tt=new Thread(t);
consumers.add(t);
threads.put(t,tt);
tt.start();
}
Thread.sleep(10);
for (int i=0; i < 5; i++) {
manager.swapQueue(queueClass,5000,"",null);
}
for ( Putter p : producers) {
p.stop();
}
Thread.sleep(2000);
assertEquals(0,manager.size());
long totalCallsCreated=0;
for ( Putter p : producers) {
threads.get(p).interrupt();
}
for ( Putter p : producers) {
threads.get(p).join();
totalCallsCreated+=p.callsAdded;
}
long totalCallsConsumed=0;
for ( Taker t : consumers) {
threads.get(t).interrupt();
}
for ( Taker t : consumers) {
threads.get(t).join();
totalCallsConsumed+=t.callsTaken;
}
assertEquals(totalCallsConsumed,totalCallsCreated);
}
InternalCallVerifier EqualityVerifier
@Test public void testParsePeriod(){
scheduler=new DecayRpcScheduler(1,"",new Configuration());
assertEquals(DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT,scheduler.getDecayPeriodMillis());
Configuration conf=new Configuration();
conf.setLong("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,1058);
scheduler=new DecayRpcScheduler(1,"ns",conf);
assertEquals(1058L,scheduler.getDecayPeriodMillis());
}
InternalCallVerifier EqualityVerifier
@Test public void testDecay(){
Configuration conf=new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,"999999999");
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,"0.5");
scheduler=new DecayRpcScheduler(1,"ns",conf);
assertEquals(0,scheduler.getTotalCallSnapshot());
for (int i=0; i < 4; i++) {
scheduler.getPriorityLevel(mockCall("A"));
}
for (int i=0; i < 8; i++) {
scheduler.getPriorityLevel(mockCall("B"));
}
assertEquals(12,scheduler.getTotalCallSnapshot());
assertEquals(4,scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(8,scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(6,scheduler.getTotalCallSnapshot());
assertEquals(2,scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(4,scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(3,scheduler.getTotalCallSnapshot());
assertEquals(1,scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(2,scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(1,scheduler.getTotalCallSnapshot());
assertEquals(null,scheduler.getCallCountSnapshot().get("A"));
assertEquals(1,scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(0,scheduler.getTotalCallSnapshot());
assertEquals(null,scheduler.getCallCountSnapshot().get("A"));
assertEquals(null,scheduler.getCallCountSnapshot().get("B"));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=2000) public void testPeriodic() throws InterruptedException {
Configuration conf=new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,"10");
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,"0.5");
scheduler=new DecayRpcScheduler(1,"ns",conf);
assertEquals(10,scheduler.getDecayPeriodMillis());
assertEquals(0,scheduler.getTotalCallSnapshot());
for (int i=0; i < 64; i++) {
scheduler.getPriorityLevel(mockCall("A"));
}
while (scheduler.getTotalCallSnapshot() > 0) {
Thread.sleep(10);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testAccumulate(){
Configuration conf=new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,"99999999");
scheduler=new DecayRpcScheduler(1,"ns",conf);
assertEquals(0,scheduler.getCallCountSnapshot().size());
scheduler.getPriorityLevel(mockCall("A"));
assertEquals(1,scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(1,scheduler.getCallCountSnapshot().get("A").longValue());
scheduler.getPriorityLevel(mockCall("A"));
scheduler.getPriorityLevel(mockCall("B"));
scheduler.getPriorityLevel(mockCall("A"));
assertEquals(3,scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(1,scheduler.getCallCountSnapshot().get("B").longValue());
}
InternalCallVerifier EqualityVerifier
@Test public void testPriority(){
Configuration conf=new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,"99999999");
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY,"25, 50, 75");
scheduler=new DecayRpcScheduler(4,"ns",conf);
assertEquals(0,scheduler.getPriorityLevel(mockCall("A")));
assertEquals(2,scheduler.getPriorityLevel(mockCall("A")));
assertEquals(0,scheduler.getPriorityLevel(mockCall("B")));
assertEquals(1,scheduler.getPriorityLevel(mockCall("B")));
assertEquals(0,scheduler.getPriorityLevel(mockCall("C")));
assertEquals(0,scheduler.getPriorityLevel(mockCall("C")));
assertEquals(1,scheduler.getPriorityLevel(mockCall("A")));
assertEquals(1,scheduler.getPriorityLevel(mockCall("A")));
assertEquals(1,scheduler.getPriorityLevel(mockCall("A")));
assertEquals(2,scheduler.getPriorityLevel(mockCall("A")));
}
InternalCallVerifier EqualityVerifier
@Test public void testParseFactor(){
scheduler=new DecayRpcScheduler(1,"",new Configuration());
assertEquals(DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT,scheduler.getDecayFactor(),0.00001);
Configuration conf=new Configuration();
conf.set("prefix." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,"0.125");
scheduler=new DecayRpcScheduler(1,"prefix",conf);
assertEquals(0.125,scheduler.getDecayFactor(),0.00001);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the rpc server gets the retry count from client.
*/
@Test(timeout=60000) public void testCallRetryCount() throws IOException {
final int retryCount=255;
final Client client=new Client(LongWritable.class,conf);
Client.setCallIdAndRetryCount(Client.nextCallId(),255);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(retryCount,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testConnectionIdleTimeouts() throws Exception {
((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG);
final int maxIdle=1000;
final int cleanupInterval=maxIdle * 3 / 4;
final int killMax=3;
final int clients=1 + killMax * 2;
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,maxIdle);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,0);
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,killMax);
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,cleanupInterval);
final CyclicBarrier firstCallBarrier=new CyclicBarrier(2);
final CyclicBarrier callBarrier=new CyclicBarrier(clients);
final CountDownLatch allCallLatch=new CountDownLatch(clients);
final AtomicBoolean error=new AtomicBoolean();
final TestServer server=new TestServer(clients,false);
Thread[] threads=new Thread[clients];
try {
server.callListener=new Runnable(){
AtomicBoolean first=new AtomicBoolean(true);
@Override public void run(){
try {
allCallLatch.countDown();
if (first.compareAndSet(true,false)) {
firstCallBarrier.await();
}
else {
callBarrier.await();
}
}
catch ( Throwable t) {
LOG.error(t);
error.set(true);
}
}
}
;
server.start();
final CountDownLatch callReturned=new CountDownLatch(clients - 1);
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
final Configuration clientConf=new Configuration();
clientConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,10000);
for (int i=0; i < clients; i++) {
threads[i]=new Thread(new Runnable(){
@Override public void run(){
Client client=new Client(LongWritable.class,clientConf);
try {
client.call(new LongWritable(Thread.currentThread().getId()),addr,null,null,0,clientConf);
callReturned.countDown();
Thread.sleep(10000);
}
catch ( IOException e) {
LOG.error(e);
}
catch ( InterruptedException e) {
}
}
}
);
threads[i].start();
}
allCallLatch.await();
assertFalse(error.get());
assertEquals(clients,server.getNumOpenConnections());
callBarrier.await();
callReturned.await();
assertEquals(clients,server.getNumOpenConnections());
Thread.sleep(maxIdle * 2 - cleanupInterval);
for (int i=clients; i > 1; i-=killMax) {
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(i,server.getNumOpenConnections());
}
Thread.sleep(cleanupInterval);
assertFalse(error.get());
assertEquals(1,server.getNumOpenConnections());
firstCallBarrier.await();
Thread.sleep(maxIdle * 2);
assertFalse(error.get());
assertEquals(0,server.getNumOpenConnections());
}
finally {
for ( Thread t : threads) {
if (t != null) {
t.interrupt();
t.join();
}
server.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
/**
* Test if
* (1) the rpc server uses the call id/retry provided by the rpc client, and
* (2) the rpc client receives the same call id/retry from the rpc server.
*/
@Test(timeout=60000) public void testCallIdAndRetry() throws IOException {
final CallInfo info=new CallInfo();
final Client client=new Client(LongWritable.class,conf){
@Override Call createCall( RpcKind rpcKind, Writable rpcRequest){
final Call call=super.createCall(rpcKind,rpcRequest);
info.id=call.id;
info.retry=call.retry;
return call;
}
@Override void checkResponse( RpcResponseHeaderProto header) throws IOException {
super.checkResponse(header);
Assert.assertEquals(info.id,header.getCallId());
Assert.assertEquals(info.retry,header.getRetryCount());
}
}
;
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(info.id,Server.getCallId());
Assert.assertEquals(info.retry,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if the rpc server gets the default retry count (0) from client.
*/
@Test(timeout=60000) public void testInitialCallRetryCount() throws IOException {
final Client client=new Client(LongWritable.class,conf);
final TestServer server=new TestServer(1,false);
server.callListener=new Runnable(){
@Override public void run(){
Assert.assertEquals(0,Server.getCallRetryCount());
}
}
;
try {
InetSocketAddress addr=NetUtils.getConnectAddress(server);
server.start();
final SerialCaller caller=new SerialCaller(client,addr,10);
caller.run();
assertFalse(caller.failed);
}
finally {
client.stop();
server.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testUserIdentityProvider() throws IOException {
UserIdentityProvider uip=new UserIdentityProvider();
String identity=uip.makeIdentity(new FakeSchedulable());
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
String username=ugi.getUserName();
assertEquals(username,identity);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPluggableIdentityProvider(){
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,"org.apache.hadoop.ipc.UserIdentityProvider");
List providers=conf.getInstances(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,IdentityProvider.class);
assertTrue(providers.size() == 1);
IdentityProvider ip=providers.get(0);
assertNotNull(ip);
assertEquals(ip.getClass(),UserIdentityProvider.class);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Check that we can reach a NameNode or Resource Manager using a specific
* socket factory
*/
@Test public void testSocketFactory() throws IOException {
Configuration sconf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(sconf).numDataNodes(1).build();
final int nameNodePort=cluster.getNameNodePort();
FileSystem fs=cluster.getFileSystem();
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem directDfs=(DistributedFileSystem)fs;
Configuration cconf=getCustomSocketConfigs(nameNodePort);
fs=FileSystem.get(cconf);
Assert.assertTrue(fs instanceof DistributedFileSystem);
DistributedFileSystem dfs=(DistributedFileSystem)fs;
JobClient client=null;
MiniMRYarnCluster miniMRYarnCluster=null;
try {
Path filePath=new Path("/dir");
Assert.assertFalse(directDfs.exists(filePath));
Assert.assertFalse(dfs.exists(filePath));
directDfs.mkdirs(filePath);
Assert.assertTrue(directDfs.exists(filePath));
Assert.assertTrue(dfs.exists(filePath));
fs=FileSystem.get(sconf);
JobConf jobConf=new JobConf();
FileSystem.setDefaultUri(jobConf,fs.getUri().toString());
miniMRYarnCluster=initAndStartMiniMRYarnCluster(jobConf);
JobConf jconf=new JobConf(miniMRYarnCluster.getConfig());
jconf.set("hadoop.rpc.socket.factory.class.default","org.apache.hadoop.ipc.DummySocketFactory");
jconf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
String rmAddress=jconf.get("yarn.resourcemanager.address");
String[] split=rmAddress.split(":");
jconf.set("yarn.resourcemanager.address",split[0] + ':' + (Integer.parseInt(split[1]) + 10));
client=new JobClient(jconf);
JobStatus[] jobs=client.jobsToComplete();
Assert.assertTrue(jobs.length == 0);
}
finally {
closeClient(client);
closeDfs(dfs);
closeDfs(directDfs);
stopMiniMRYarnCluster(miniMRYarnCluster);
shutdownDFSCluster(cluster);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void test1() throws IOException {
ProtocolProxy> proxy;
proxy=RPC.getProtocolProxy(Foo0.class,Foo0.versionID,addr,conf);
Foo0 foo0=(Foo0)proxy.getProxy();
Assert.assertEquals("Foo0",foo0.ping());
proxy=RPC.getProtocolProxy(Foo1.class,Foo1.versionID,addr,conf);
Foo1 foo1=(Foo1)proxy.getProxy();
Assert.assertEquals("Foo1",foo1.ping());
Assert.assertEquals("Foo1",foo1.ping());
proxy=RPC.getProtocolProxy(Bar.class,Foo1.versionID,addr,conf);
Bar bar=(Bar)proxy.getProxy();
Assert.assertEquals(99,bar.echo(99));
Mixin mixin=bar;
mixin.hello();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* getProtocolVersion of an unimplemented version should return highest version
* Similarly getProtocolSignature should work.
* @throws IOException
*/
@Test public void testNonExistingProtocol2() throws IOException {
ProtocolProxy> proxy;
proxy=RPC.getProtocolProxy(FooUnimplemented.class,FooUnimplemented.versionID,addr,conf);
FooUnimplemented foo=(FooUnimplemented)proxy.getProxy();
Assert.assertEquals(Foo1.versionID,foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class),FooUnimplemented.versionID));
foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class),FooUnimplemented.versionID,0);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=6000) public void testExtraLongRpc() throws Exception {
TestRpcService2 client=getClient2();
final String shortString=StringUtils.repeat("X",4);
EchoRequestProto echoRequest=EchoRequestProto.newBuilder().setMessage(shortString).build();
EchoResponseProto echoResponse=client.echo2(null,echoRequest);
Assert.assertEquals(shortString,echoResponse.getMessage());
final String longString=StringUtils.repeat("X",4096);
echoRequest=EchoRequestProto.newBuilder().setMessage(longString).build();
try {
echoResponse=client.echo2(null,echoRequest);
Assert.fail("expected extra-long RPC to fail");
}
catch ( ServiceException se) {
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testProtoBufRandomException() throws Exception {
TestRpcService client=getClient();
EmptyRequestProto emptyRequest=EmptyRequestProto.newBuilder().build();
try {
client.error2(null,emptyRequest);
}
catch ( ServiceException se) {
Assert.assertTrue(se.getCause() instanceof RemoteException);
RemoteException re=(RemoteException)se.getCause();
Assert.assertTrue(re.getClassName().equals(URISyntaxException.class.getName()));
Assert.assertTrue(re.getMessage().contains("testException"));
Assert.assertTrue(re.getErrorCode().equals(RpcErrorCodeProto.ERROR_APPLICATION));
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testProtoBufRpc2() throws Exception {
TestRpcService2 client=getClient2();
EmptyRequestProto emptyRequest=EmptyRequestProto.newBuilder().build();
client.ping2(null,emptyRequest);
EchoRequestProto echoRequest=EchoRequestProto.newBuilder().setMessage("hello").build();
EchoResponseProto echoResponse=client.echo2(null,echoRequest);
Assert.assertEquals(echoResponse.getMessage(),"hello");
MetricsRecordBuilder rpcMetrics=getMetrics(server.getRpcMetrics().name());
assertCounterGt("RpcQueueTimeNumOps",0L,rpcMetrics);
assertCounterGt("RpcProcessingTimeNumOps",0L,rpcMetrics);
MetricsRecordBuilder rpcDetailedMetrics=getMetrics(server.getRpcDetailedMetrics().name());
assertCounterGt("Echo2NumOps",0L,rpcDetailedMetrics);
}
InternalCallVerifier BooleanVerifier
@Test public void testErrorMsgForInsecureClient() throws IOException {
Configuration serverConf=new Configuration(conf);
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,serverConf);
UserGroupInformation.setConfiguration(serverConf);
final Server server=new RPC.Builder(serverConf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
UserGroupInformation.setConfiguration(conf);
boolean succeeded=false;
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestProtocol proxy=null;
try {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
proxy.echo("");
}
catch ( RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
succeeded=true;
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
assertTrue(succeeded);
conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,2);
UserGroupInformation.setConfiguration(serverConf);
final Server multiServer=new RPC.Builder(serverConf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
multiServer.start();
succeeded=false;
final InetSocketAddress mulitServerAddr=NetUtils.getConnectAddress(multiServer);
proxy=null;
try {
UserGroupInformation.setConfiguration(conf);
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,mulitServerAddr,conf);
proxy.echo("");
}
catch ( RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
succeeded=true;
}
finally {
multiServer.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
assertTrue(succeeded);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStopProxy() throws IOException {
StoppedProtocol proxy=RPC.getProxy(StoppedProtocol.class,StoppedProtocol.versionID,null,conf);
StoppedInvocationHandler invocationHandler=(StoppedInvocationHandler)Proxy.getInvocationHandler(proxy);
assertEquals(0,invocationHandler.getCloseCalled());
RPC.stopProxy(proxy);
assertEquals(1,invocationHandler.getCloseCalled());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRpcMetrics() throws Exception {
Configuration configuration=new Configuration();
final int interval=1;
configuration.setBoolean(CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE,true);
configuration.set(CommonConfigurationKeys.RPC_METRICS_PERCENTILES_INTERVALS_KEY,"" + interval);
final Server server=new RPC.Builder(configuration).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
final TestProtocol proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,server.getListenerAddress(),configuration);
try {
for (int i=0; i < 1000; i++) {
proxy.ping();
proxy.echo("" + i);
}
MetricsRecordBuilder rpcMetrics=getMetrics(server.getRpcMetrics().name());
assertTrue("Expected non-zero rpc queue time",getLongCounter("RpcQueueTimeNumOps",rpcMetrics) > 0);
assertTrue("Expected non-zero rpc processing time",getLongCounter("RpcProcessingTimeNumOps",rpcMetrics) > 0);
MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s",rpcMetrics);
MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s",rpcMetrics);
}
finally {
if (proxy != null) {
RPC.stopProxy(proxy);
}
server.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testProxyAddress() throws IOException {
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build();
TestProtocol proxy=null;
try {
server.start();
InetSocketAddress addr=NetUtils.getConnectAddress(server);
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
assertEquals(addr,RPC.getServerAddress(proxy));
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testSlowRpc() throws IOException {
System.out.println("Testing Slow RPC");
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
TestProtocol proxy=null;
try {
server.start();
InetSocketAddress addr=NetUtils.getConnectAddress(server);
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
SlowRPC slowrpc=new SlowRPC(proxy);
Thread thread=new Thread(slowrpc,"SlowRPC");
thread.start();
assertTrue("Slow RPC should not have finished1.",!slowrpc.isDone());
proxy.slowPing(false);
assertTrue("Slow RPC should not have finished2.",!slowrpc.isDone());
proxy.slowPing(false);
while (!slowrpc.isDone()) {
System.out.println("Waiting for slow RPC to get done.");
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
System.out.println("Down slow rpc testing");
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testWrappedStopProxy() throws IOException {
StoppedProtocol wrappedProxy=RPC.getProxy(StoppedProtocol.class,StoppedProtocol.versionID,null,conf);
StoppedInvocationHandler invocationHandler=(StoppedInvocationHandler)Proxy.getInvocationHandler(wrappedProxy);
StoppedProtocol proxy=(StoppedProtocol)RetryProxy.create(StoppedProtocol.class,wrappedProxy,RetryPolicies.RETRY_FOREVER);
assertEquals(0,invocationHandler.getCloseCalled());
RPC.stopProxy(proxy);
assertEquals(1,invocationHandler.getCloseCalled());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConfRpc() throws IOException {
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(1).setVerbose(false).build();
int confQ=conf.getInt(CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT);
assertEquals(confQ,server.getMaxQueueSize());
int confReaders=conf.getInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_DEFAULT);
assertEquals(confReaders,server.getNumReaders());
server.stop();
server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(1).setnumReaders(3).setQueueSizePerHandler(200).setVerbose(false).build();
assertEquals(3,server.getNumReaders());
assertEquals(200,server.getMaxQueueSize());
server.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
* the server registry to extract protocol signatures and versions.
*/
@Test public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
TestImpl1 impl=new TestImpl1();
server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl);
server.start();
ProtocolMetaInfoServerSideTranslatorPB xlator=new ProtocolMetaInfoServerSideTranslatorPB(server);
GetProtocolSignatureResponseProto resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER));
Assert.assertEquals(0,resp.getProtocolSignatureCount());
resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_WRITABLE));
Assert.assertEquals(1,resp.getProtocolSignatureCount());
ProtocolSignatureProto sig=resp.getProtocolSignatureList().get(0);
Assert.assertEquals(TestProtocol1.versionID,sig.getVersion());
boolean found=false;
int expected=ProtocolSignature.getFingerprint(TestProtocol1.class.getMethod("echo",String.class));
for ( int m : sig.getMethodsList()) {
if (expected == m) {
found=true;
break;
}
}
Assert.assertTrue(found);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testVersionMismatch() throws IOException {
server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.start();
addr=NetUtils.getConnectAddress(server);
TestProtocol4 proxy=RPC.getProxy(TestProtocol4.class,TestProtocol4.versionID,addr,conf);
try {
proxy.echo(21);
fail("The call must throw VersionMismatch exception");
}
catch ( RemoteException ex) {
Assert.assertEquals(RPC.VersionMismatch.class.getName(),ex.getClassName());
Assert.assertTrue(ex.getErrorCode().equals(RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH));
}
catch ( IOException ex) {
fail("Expected version mismatch but got " + ex);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testVersion2ClientVersion1Server() throws Exception {
TestImpl1 impl=new TestImpl1();
server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl);
server.start();
addr=NetUtils.getConnectAddress(server);
Version2Client client=new Version2Client();
client.ping();
assertEquals("hello",client.echo("hello"));
assertEquals(3,client.echo(3));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testIsMethodSupported() throws IOException {
server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.start();
addr=NetUtils.getConnectAddress(server);
TestProtocol2 proxy=RPC.getProxy(TestProtocol2.class,TestProtocol2.versionID,addr,conf);
boolean supported=RpcClientUtil.isMethodSupported(proxy,TestProtocol2.class,RPC.RpcKind.RPC_WRITABLE,RPC.getProtocolVersion(TestProtocol2.class),"echo");
Assert.assertTrue(supported);
supported=RpcClientUtil.isMethodSupported(proxy,TestProtocol2.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER,RPC.getProtocolVersion(TestProtocol2.class),"echo");
Assert.assertFalse(supported);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testVersion2ClientVersion2Server() throws Exception {
TestImpl2 impl=new TestImpl2();
server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl);
server.start();
addr=NetUtils.getConnectAddress(server);
Version2Client client=new Version2Client();
client.ping();
assertEquals("hello",client.echo("hello"));
assertEquals(-3,client.echo(3));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPingInterval() throws Exception {
Configuration newConf=new Configuration(conf);
newConf.set(SERVER_PRINCIPAL_KEY,SERVER_PRINCIPAL_1);
conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,true);
ConnectionId remoteId=ConnectionId.getConnectionId(new InetSocketAddress(0),TestSaslProtocol.class,null,0,newConf);
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,remoteId.getPingInterval());
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,false);
remoteId=ConnectionId.getConnectionId(new InetSocketAddress(0),TestSaslProtocol.class,null,0,newConf);
assertEquals(0,remoteId.getPingInterval());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testErrorMessage() throws Exception {
BadTokenSecretManager sm=new BadTokenSecretManager();
final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
boolean succeeded=false;
try {
doDigestRpc(server,sm);
}
catch ( RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertEquals(ERROR_MESSAGE,e.getLocalizedMessage());
assertTrue(e.unwrapRemoteException() instanceof InvalidToken);
succeeded=true;
}
assertTrue(succeeded);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testPerConnectionConf() throws Exception {
TestTokenSecretManager sm=new TestTokenSecretManager();
final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current=UserGroupInformation.getCurrentUser();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()));
Token token=new Token(tokenId,sm);
SecurityUtil.setTokenService(token,addr);
current.addToken(token);
Configuration newConf=new Configuration(conf);
newConf.set(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"");
Client client=null;
TestSaslProtocol proxy1=null;
TestSaslProtocol proxy2=null;
TestSaslProtocol proxy3=null;
int timeouts[]={111222,3333333};
try {
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[0]);
proxy1=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy1.getAuthMethod();
client=WritableRpcEngine.getClient(newConf);
Set conns=client.getConnectionIds();
assertEquals("number of connections in cache is wrong",1,conns.size());
proxy2=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy2.getAuthMethod();
assertEquals("number of connections in cache is wrong",1,conns.size());
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[1]);
proxy3=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf);
proxy3.getAuthMethod();
assertEquals("number of connections in cache is wrong",2,conns.size());
ConnectionId[] connsArray={RPC.getConnectionIdForProxy(proxy1),RPC.getConnectionIdForProxy(proxy2),RPC.getConnectionIdForProxy(proxy3)};
assertEquals(connsArray[0],connsArray[1]);
assertEquals(connsArray[0].getMaxIdleTime(),timeouts[0]);
assertFalse(connsArray[0].equals(connsArray[2]));
assertNotSame(connsArray[2].getMaxIdleTime(),timeouts[1]);
}
finally {
server.stop();
if (client != null) {
client.getConnectionIds().clear();
}
if (proxy1 != null) RPC.stopProxy(proxy1);
if (proxy2 != null) RPC.stopProxy(proxy2);
if (proxy3 != null) RPC.stopProxy(proxy3);
}
}
InternalCallVerifier BooleanVerifier
@Test public void testExceptionsHandler(){
Server.ExceptionsHandler handler=new Server.ExceptionsHandler();
handler.addTerseExceptions(IOException.class);
handler.addTerseExceptions(RpcServerException.class,IpcException.class);
assertTrue(handler.isTerse(IOException.class));
assertTrue(handler.isTerse(RpcServerException.class));
assertTrue(handler.isTerse(IpcException.class));
assertFalse(handler.isTerse(RpcClientException.class));
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testSocketFactoryAsKeyInMap(){
Map dummyCache=new HashMap();
int toBeCached1=1;
int toBeCached2=2;
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.ipc.TestSocketFactory$DummySocketFactory");
final SocketFactory dummySocketFactory=NetUtils.getDefaultSocketFactory(conf);
dummyCache.put(dummySocketFactory,toBeCached1);
conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.net.StandardSocketFactory");
final SocketFactory defaultSocketFactory=NetUtils.getDefaultSocketFactory(conf);
dummyCache.put(defaultSocketFactory,toBeCached2);
Assert.assertEquals("The cache contains two elements",2,dummyCache.size());
Assert.assertEquals("Equals of both socket factory shouldn't be same",defaultSocketFactory.equals(dummySocketFactory),false);
assertSame(toBeCached2,dummyCache.remove(defaultSocketFactory));
dummyCache.put(defaultSocketFactory,toBeCached2);
assertSame(toBeCached1,dummyCache.remove(dummySocketFactory));
}
IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testCustomPattern(){
Configuration conf=new Configuration();
conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,"1","1");
mux=new WeightedRoundRobinMultiplexer(2,"test.custom",conf);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,"1","3","2");
mux=new WeightedRoundRobinMultiplexer(3,"test.custom",conf);
for (int i=0; i < 5; i++) {
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),2);
assertEquals(mux.getAndAdvanceCurrentIndex(),2);
}
}
IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testDefaultPattern(){
mux=new WeightedRoundRobinMultiplexer(1,"",new Configuration());
for (int i=0; i < 10; i++) {
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
}
mux=new WeightedRoundRobinMultiplexer(2,"",new Configuration());
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
mux=new WeightedRoundRobinMultiplexer(3,"",new Configuration());
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),2);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
mux=new WeightedRoundRobinMultiplexer(4,"",new Configuration());
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),1);
assertEquals(mux.getAndAdvanceCurrentIndex(),2);
assertEquals(mux.getAndAdvanceCurrentIndex(),2);
assertEquals(mux.getAndAdvanceCurrentIndex(),3);
assertEquals(mux.getAndAdvanceCurrentIndex(),0);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void callable() throws Exception {
C c=new C();
RunnableCallable rc=new RunnableCallable(c);
rc.run();
assertTrue(c.RUN);
c=new C();
rc=new RunnableCallable(c);
rc.call();
assertTrue(c.RUN);
assertEquals(rc.toString(),"C");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void runnable() throws Exception {
R r=new R();
RunnableCallable rc=new RunnableCallable(r);
rc.run();
assertTrue(r.RUN);
r=new R();
rc=new RunnableCallable(r);
rc.call();
assertTrue(r.RUN);
assertEquals(rc.toString(),"R");
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testXException() throws Exception {
XException ex=new XException(TestERROR.TC);
assertEquals(ex.getError(),TestERROR.TC);
assertEquals(ex.getMessage(),"TC: {0}");
assertNull(ex.getCause());
ex=new XException(TestERROR.TC,"msg");
assertEquals(ex.getError(),TestERROR.TC);
assertEquals(ex.getMessage(),"TC: msg");
assertNull(ex.getCause());
Exception cause=new Exception();
ex=new XException(TestERROR.TC,cause);
assertEquals(ex.getError(),TestERROR.TC);
assertEquals(ex.getMessage(),"TC: " + cause.toString());
assertEquals(ex.getCause(),cause);
XException xcause=ex;
ex=new XException(xcause);
assertEquals(ex.getError(),TestERROR.TC);
assertEquals(ex.getMessage(),xcause.getMessage());
assertEquals(ex.getCause(),xcause);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void baseService() throws Exception {
BaseService service=new MyService();
assertNull(service.getInterface());
assertEquals(service.getPrefix(),"myservice");
assertEquals(service.getServiceDependencies().length,0);
Server server=Mockito.mock(Server.class);
Configuration conf=new Configuration(false);
conf.set("server.myservice.foo","FOO");
conf.set("server.myservice1.bar","BAR");
Mockito.when(server.getConfig()).thenReturn(conf);
Mockito.when(server.getPrefixedName("myservice.foo")).thenReturn("server.myservice.foo");
Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice.");
service.init(server);
assertEquals(service.getPrefixedName("foo"),"server.myservice.foo");
assertEquals(service.getServiceConfig().size(),1);
assertEquals(service.getServiceConfig().get("foo"),"FOO");
assertTrue(MyService.INIT);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @TestDir public void lifeCycle() throws Exception {
Configuration conf=new Configuration(false);
conf.set("server.services",LifeCycleService.class.getName());
Server server=createServer(conf);
assertEquals(server.getStatus(),Server.Status.UNDEF);
server.init();
assertNotNull(server.get(LifeCycleService.class));
assertEquals(server.getStatus(),Server.Status.NORMAL);
server.destroy();
assertEquals(server.getStatus(),Server.Status.SHUTDOWN);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @TestDir public void serviceLifeCycle() throws Exception {
TestService.LIFECYCLE.clear();
Configuration conf=new Configuration(false);
conf.set("server.services",TestService.class.getName());
Server server=createServer(conf);
server.init();
assertNotNull(server.get(TestService.class));
server.destroy();
assertEquals(TestService.LIFECYCLE,Arrays.asList("init","postInit","serverStatusChange","destroy"));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @TestDir public void constructorsGetters() throws Exception {
Server server=new Server("server",getAbsolutePath("/a"),getAbsolutePath("/b"),getAbsolutePath("/c"),getAbsolutePath("/d"),new Configuration(false));
assertEquals(server.getHomeDir(),getAbsolutePath("/a"));
assertEquals(server.getConfigDir(),getAbsolutePath("/b"));
assertEquals(server.getLogDir(),getAbsolutePath("/c"));
assertEquals(server.getTempDir(),getAbsolutePath("/d"));
assertEquals(server.getName(),"server");
assertEquals(server.getPrefix(),"server");
assertEquals(server.getPrefixedName("name"),"server.name");
assertNotNull(server.getConfig());
server=new Server("server",getAbsolutePath("/a"),getAbsolutePath("/b"),getAbsolutePath("/c"),getAbsolutePath("/d"));
assertEquals(server.getHomeDir(),getAbsolutePath("/a"));
assertEquals(server.getConfigDir(),getAbsolutePath("/b"));
assertEquals(server.getLogDir(),getAbsolutePath("/c"));
assertEquals(server.getTempDir(),getAbsolutePath("/d"));
assertEquals(server.getName(),"server");
assertEquals(server.getPrefix(),"server");
assertEquals(server.getPrefixedName("name"),"server.name");
assertNull(server.getConfig());
server=new Server("server",TestDirHelper.getTestDir().getAbsolutePath(),new Configuration(false));
assertEquals(server.getHomeDir(),TestDirHelper.getTestDir().getAbsolutePath());
assertEquals(server.getConfigDir(),TestDirHelper.getTestDir() + "/conf");
assertEquals(server.getLogDir(),TestDirHelper.getTestDir() + "/log");
assertEquals(server.getTempDir(),TestDirHelper.getTestDir() + "/temp");
assertEquals(server.getName(),"server");
assertEquals(server.getPrefix(),"server");
assertEquals(server.getPrefixedName("name"),"server.name");
assertNotNull(server.getConfig());
server=new Server("server",TestDirHelper.getTestDir().getAbsolutePath());
assertEquals(server.getHomeDir(),TestDirHelper.getTestDir().getAbsolutePath());
assertEquals(server.getConfigDir(),TestDirHelper.getTestDir() + "/conf");
assertEquals(server.getLogDir(),TestDirHelper.getTestDir() + "/log");
assertEquals(server.getTempDir(),TestDirHelper.getTestDir() + "/temp");
assertEquals(server.getName(),"server");
assertEquals(server.getPrefix(),"server");
assertEquals(server.getPrefixedName("name"),"server.name");
assertNull(server.getConfig());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test @TestDir public void services() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf;
Server server;
ORDER.clear();
conf=new Configuration(false);
server=new Server("server",dir,dir,dir,dir,conf);
server.init();
assertEquals(ORDER.size(),0);
ORDER.clear();
String services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName()));
conf=new Configuration(false);
conf.set("server.services",services);
server=new Server("server",dir,dir,dir,dir,conf);
server.init();
assertEquals(server.get(MyService1.class).getInterface(),MyService1.class);
assertEquals(server.get(MyService3.class).getInterface(),MyService3.class);
assertEquals(ORDER.size(),4);
assertEquals(ORDER.get(0),"s1.init");
assertEquals(ORDER.get(1),"s3.init");
assertEquals(ORDER.get(2),"s1.postInit");
assertEquals(ORDER.get(3),"s3.postInit");
server.destroy();
assertEquals(ORDER.size(),6);
assertEquals(ORDER.get(4),"s3.destroy");
assertEquals(ORDER.get(5),"s1.destroy");
ORDER.clear();
services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService2.class.getName(),MyService3.class.getName()));
conf=new Configuration(false);
conf.set("server.services",services);
server=new Server("server",dir,dir,dir,dir,conf);
try {
server.init();
fail();
}
catch ( ServerException ex) {
assertEquals(MyService2.class,ex.getError().getClass());
}
catch ( Exception ex) {
fail();
}
assertEquals(ORDER.size(),3);
assertEquals(ORDER.get(0),"s1.init");
assertEquals(ORDER.get(1),"s2.init");
assertEquals(ORDER.get(2),"s1.destroy");
ORDER.clear();
services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService5.class.getName()));
conf=new Configuration(false);
conf.set("server.services",services);
server=new Server("server",dir,dir,dir,dir,conf);
server.init();
assertEquals(ORDER.size(),4);
assertEquals(ORDER.get(0),"s1.init");
assertEquals(ORDER.get(1),"s5.init");
assertEquals(ORDER.get(2),"s1.postInit");
assertEquals(ORDER.get(3),"s5.postInit");
server.destroy();
assertEquals(ORDER.size(),6);
assertEquals(ORDER.get(4),"s5.destroy");
assertEquals(ORDER.get(5),"s1.destroy");
ORDER.clear();
services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName()));
String servicesExt=StringUtils.join(",",Arrays.asList(MyService1a.class.getName()));
conf=new Configuration(false);
conf.set("server.services",services);
conf.set("server.services.ext",servicesExt);
server=new Server("server",dir,dir,dir,dir,conf);
server.init();
assertEquals(server.get(MyService1.class).getClass(),MyService1a.class);
assertEquals(ORDER.size(),4);
assertEquals(ORDER.get(0),"s1a.init");
assertEquals(ORDER.get(1),"s3.init");
assertEquals(ORDER.get(2),"s1a.postInit");
assertEquals(ORDER.get(3),"s3.postInit");
server.destroy();
assertEquals(ORDER.size(),6);
assertEquals(ORDER.get(4),"s3.destroy");
assertEquals(ORDER.get(5),"s1a.destroy");
ORDER.clear();
services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName()));
conf=new Configuration(false);
conf.set("server.services",services);
server=new Server("server",dir,dir,dir,dir,conf);
server.init();
server.setService(MyService1a.class);
assertEquals(ORDER.size(),6);
assertEquals(ORDER.get(4),"s1.destroy");
assertEquals(ORDER.get(5),"s1a.init");
assertEquals(server.get(MyService1.class).getClass(),MyService1a.class);
server.destroy();
assertEquals(ORDER.size(),8);
assertEquals(ORDER.get(6),"s3.destroy");
assertEquals(ORDER.get(7),"s1a.destroy");
ORDER.clear();
services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName()));
conf=new Configuration(false);
conf.set("server.services",services);
server=new Server("server",dir,dir,dir,dir,conf);
server.init();
server.setService(MyService5.class);
assertEquals(ORDER.size(),5);
assertEquals(ORDER.get(4),"s5.init");
assertEquals(server.get(MyService5.class).getClass(),MyService5.class);
server.destroy();
assertEquals(ORDER.size(),8);
assertEquals(ORDER.get(5),"s5.destroy");
assertEquals(ORDER.get(6),"s3.destroy");
assertEquals(ORDER.get(7),"s1.destroy");
ORDER.clear();
services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName()));
conf=new Configuration(false);
conf.set("server.services",services);
server=new Server("server",dir,dir,dir,dir,conf);
server.init();
try {
server.setService(MyService7.class);
fail();
}
catch ( ServerException ex) {
assertEquals(ServerException.ERROR.S09,ex.getError());
}
catch ( Exception ex) {
fail();
}
assertEquals(ORDER.size(),6);
assertEquals(ORDER.get(4),"s3.destroy");
assertEquals(ORDER.get(5),"s1.destroy");
ORDER.clear();
services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService6.class.getName()));
conf=new Configuration(false);
conf.set("server.services",services);
server=new Server("server",dir,dir,dir,dir,conf);
server.init();
assertEquals(server.get(MyService1.class).getInterface(),MyService1.class);
assertEquals(server.get(MyService6.class).getInterface(),MyService6.class);
server.destroy();
}
InternalCallVerifier EqualityVerifier
@Test @TestDir public void loadingDefaultConfig() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
Server server=new Server("testserver",dir,dir,dir,dir);
server.init();
assertEquals(server.getConfig().get("testserver.a"),"default");
}
InternalCallVerifier EqualityVerifier
@Test @TestDir public void loadingSysPropConfig() throws Exception {
try {
System.setProperty("testserver.a","sysprop");
String dir=TestDirHelper.getTestDir().getAbsolutePath();
File configFile=new File(dir,"testserver-site.xml");
Writer w=new FileWriter(configFile);
w.write("testserver.a site ");
w.close();
Server server=new Server("testserver",dir,dir,dir,dir);
server.init();
assertEquals(server.getConfig().get("testserver.a"),"sysprop");
}
finally {
System.getProperties().remove("testserver.a");
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @TestDir public void startWithStatusNotNormal() throws Exception {
Configuration conf=new Configuration(false);
conf.set("server.startup.status","ADMIN");
Server server=createServer(conf);
server.init();
assertEquals(server.getStatus(),Server.Status.ADMIN);
server.destroy();
}
InternalCallVerifier EqualityVerifier
@Test @TestDir public void loadingSiteConfig() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
File configFile=new File(dir,"testserver-site.xml");
Writer w=new FileWriter(configFile);
w.write("testserver.a site ");
w.close();
Server server=new Server("testserver",dir,dir,dir,dir);
server.init();
assertEquals(server.getConfig().get("testserver.a"),"site");
}
UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test @TestDir @TestHdfs public void createFileSystem() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName()));
Configuration hadoopConf=new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf=new Configuration(false);
conf.set("server.services",services);
conf.set("server.hadoop.filesystem.cache.purge.timeout","0");
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
FileSystemAccess hadoop=server.get(FileSystemAccess.class);
FileSystem fs=hadoop.createFileSystem("u",hadoop.getFileSystemConfiguration());
Assert.assertNotNull(fs);
fs.mkdirs(new Path("/tmp/foo"));
hadoop.releaseFileSystem(fs);
try {
fs.mkdirs(new Path("/tmp/foo"));
Assert.fail();
}
catch ( IOException ex) {
}
catch ( Exception ex) {
Assert.fail();
}
server.destroy();
}
UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @TestDir @TestHdfs public void fileSystemCache() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName()));
Configuration hadoopConf=new Configuration(false);
hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
createHadoopConf(hadoopConf);
Configuration conf=new Configuration(false);
conf.set("server.services",services);
conf.set("server.hadoop.filesystem.cache.purge.frequency","1");
conf.set("server.hadoop.filesystem.cache.purge.timeout","1");
Server server=new Server("server",dir,dir,dir,dir,conf);
try {
server.init();
FileSystemAccess hadoop=server.get(FileSystemAccess.class);
FileSystem fs1=hadoop.createFileSystem("u",hadoop.getFileSystemConfiguration());
Assert.assertNotNull(fs1);
fs1.mkdirs(new Path("/tmp/foo1"));
hadoop.releaseFileSystem(fs1);
fs1.mkdirs(new Path("/tmp/foo2"));
FileSystem fs2=hadoop.createFileSystem("u",hadoop.getFileSystemConfiguration());
Assert.assertEquals(fs1,fs2);
Thread.sleep(4 * 1000);
fs1.mkdirs(new Path("/tmp/foo2"));
Thread.sleep(4 * 1000);
fs2.mkdirs(new Path("/tmp/foo"));
hadoop.releaseFileSystem(fs2);
Thread.sleep(4 * 1000);
try {
fs2.mkdirs(new Path("/tmp/foo"));
Assert.fail();
}
catch ( IOException ex) {
}
catch ( Exception ex) {
Assert.fail();
}
}
finally {
server.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test @TestDir public void serviceHadoopConfCustomDir() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
String hadoopConfDir=new File(dir,"confx").getAbsolutePath();
new File(hadoopConfDir).mkdirs();
String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName()));
Configuration conf=new Configuration(false);
conf.set("server.services",services);
conf.set("server.hadoop.config.dir",hadoopConfDir);
File hdfsSite=new File(hadoopConfDir,"hdfs-site.xml");
OutputStream os=new FileOutputStream(hdfsSite);
Configuration hadoopConf=new Configuration(false);
hadoopConf.set("foo","BAR");
hadoopConf.writeXml(os);
os.close();
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
FileSystemAccessService fsAccess=(FileSystemAccessService)server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"),"BAR");
server.destroy();
}
InternalCallVerifier NullVerifier
@Test @TestDir public void simpleSecurity() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName()));
Configuration conf=new Configuration(false);
conf.set("server.services",services);
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
Assert.assertNotNull(server.get(FileSystemAccess.class));
server.destroy();
}
InternalCallVerifier EqualityVerifier
@Test @TestDir public void serviceHadoopConf() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName()));
Configuration conf=new Configuration(false);
conf.set("server.services",services);
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
FileSystemAccessService fsAccess=(FileSystemAccessService)server.get(FileSystemAccess.class);
Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"),"FOO");
server.destroy();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void sampler() throws Exception {
final long value[]=new long[1];
Instrumentation.Variable var=new Instrumentation.Variable(){
@Override public Long getValue(){
return value[0];
}
}
;
InstrumentationService.Sampler sampler=new InstrumentationService.Sampler();
sampler.init(4,var);
assertEquals(sampler.getRate(),0f,0.0001);
sampler.sample();
assertEquals(sampler.getRate(),0f,0.0001);
value[0]=1;
sampler.sample();
assertEquals(sampler.getRate(),(0d + 1) / 2,0.0001);
value[0]=2;
sampler.sample();
assertEquals(sampler.getRate(),(0d + 1 + 2) / 3,0.0001);
value[0]=3;
sampler.sample();
assertEquals(sampler.getRate(),(0d + 1 + 2+ 3) / 4,0.0001);
value[0]=4;
sampler.sample();
assertEquals(sampler.getRate(),(4d + 1 + 2+ 3) / 4,0.0001);
JSONObject json=(JSONObject)new JSONParser().parse(sampler.toJSONString());
assertEquals(json.size(),2);
assertEquals(json.get("sampler"),sampler.getRate());
assertEquals(json.get("size"),4L);
StringWriter writer=new StringWriter();
sampler.writeJSONString(writer);
writer.close();
json=(JSONObject)new JSONParser().parse(writer.toString());
assertEquals(json.size(),2);
assertEquals(json.get("sampler"),sampler.getRate());
assertEquals(json.get("size"),4L);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void cron(){
InstrumentationService.Cron cron=new InstrumentationService.Cron();
assertEquals(cron.start,0);
assertEquals(cron.lapStart,0);
assertEquals(cron.own,0);
assertEquals(cron.total,0);
long begin=Time.now();
assertEquals(cron.start(),cron);
assertEquals(cron.start(),cron);
assertEquals(cron.start,begin,20);
assertEquals(cron.start,cron.lapStart);
sleep(100);
assertEquals(cron.stop(),cron);
long end=Time.now();
long delta=end - begin;
assertEquals(cron.own,delta,20);
assertEquals(cron.total,0);
assertEquals(cron.lapStart,0);
sleep(100);
long reStart=Time.now();
cron.start();
assertEquals(cron.start,begin,20);
assertEquals(cron.lapStart,reStart,20);
sleep(100);
cron.stop();
long reEnd=Time.now();
delta+=reEnd - reStart;
assertEquals(cron.own,delta,20);
assertEquals(cron.total,0);
assertEquals(cron.lapStart,0);
cron.end();
assertEquals(cron.total,reEnd - begin,20);
try {
cron.start();
fail();
}
catch ( IllegalStateException ex) {
}
catch ( Exception ex) {
fail();
}
try {
cron.stop();
fail();
}
catch ( IllegalStateException ex) {
}
catch ( Exception ex) {
fail();
}
}
InternalCallVerifier NullVerifier
@Test @TestDir @SuppressWarnings("unchecked") public void service() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName()));
Configuration conf=new Configuration(false);
conf.set("server.services",services);
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
Instrumentation instrumentation=server.get(Instrumentation.class);
assertNotNull(instrumentation);
instrumentation.incr("g","c",1);
instrumentation.incr("g","c",2);
instrumentation.incr("g","c1",2);
Instrumentation.Cron cron=instrumentation.createCron();
cron.start();
sleep(100);
cron.stop();
instrumentation.addCron("g","t",cron);
cron=instrumentation.createCron();
cron.start();
sleep(200);
cron.stop();
instrumentation.addCron("g","t",cron);
Instrumentation.Variable var=new Instrumentation.Variable(){
@Override public String getValue(){
return "foo";
}
}
;
instrumentation.addVariable("g","v",var);
Instrumentation.Variable varToSample=new Instrumentation.Variable(){
@Override public Long getValue(){
return 1L;
}
}
;
instrumentation.addSampler("g","s",10,varToSample);
Map snapshot=instrumentation.getSnapshot();
assertNotNull(snapshot.get("os-env"));
assertNotNull(snapshot.get("sys-props"));
assertNotNull(snapshot.get("jvm"));
assertNotNull(snapshot.get("counters"));
assertNotNull(snapshot.get("timers"));
assertNotNull(snapshot.get("variables"));
assertNotNull(snapshot.get("samplers"));
assertNotNull(((Map)snapshot.get("os-env")).get("PATH"));
assertNotNull(((Map)snapshot.get("sys-props")).get("java.version"));
assertNotNull(((Map)snapshot.get("jvm")).get("free.memory"));
assertNotNull(((Map)snapshot.get("jvm")).get("max.memory"));
assertNotNull(((Map)snapshot.get("jvm")).get("total.memory"));
assertNotNull(((Map>)snapshot.get("counters")).get("g"));
assertNotNull(((Map>)snapshot.get("timers")).get("g"));
assertNotNull(((Map>)snapshot.get("variables")).get("g"));
assertNotNull(((Map>)snapshot.get("samplers")).get("g"));
assertNotNull(((Map>)snapshot.get("counters")).get("g").get("c"));
assertNotNull(((Map>)snapshot.get("counters")).get("g").get("c1"));
assertNotNull(((Map>)snapshot.get("timers")).get("g").get("t"));
assertNotNull(((Map>)snapshot.get("variables")).get("g").get("v"));
assertNotNull(((Map>)snapshot.get("samplers")).get("g").get("s"));
StringWriter writer=new StringWriter();
JSONObject.writeJSONString(snapshot,writer);
writer.close();
server.destroy();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void timer() throws Exception {
InstrumentationService.Timer timer=new InstrumentationService.Timer(2);
InstrumentationService.Cron cron=new InstrumentationService.Cron();
long ownStart;
long ownEnd;
long totalStart;
long totalEnd;
long ownDelta;
long totalDelta;
long avgTotal;
long avgOwn;
cron.start();
ownStart=Time.now();
totalStart=ownStart;
ownDelta=0;
sleep(100);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
sleep(100);
cron.start();
ownStart=Time.now();
sleep(100);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
totalEnd=ownEnd;
totalDelta=totalEnd - totalStart;
avgTotal=totalDelta;
avgOwn=ownDelta;
timer.addCron(cron);
long[] values=timer.getValues();
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL],totalDelta,20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN],ownDelta,20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL],avgTotal,20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN],avgOwn,20);
cron=new InstrumentationService.Cron();
cron.start();
ownStart=Time.now();
totalStart=ownStart;
ownDelta=0;
sleep(200);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
sleep(200);
cron.start();
ownStart=Time.now();
sleep(200);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
totalEnd=ownEnd;
totalDelta=totalEnd - totalStart;
avgTotal=(avgTotal * 1 + totalDelta) / 2;
avgOwn=(avgOwn * 1 + ownDelta) / 2;
timer.addCron(cron);
values=timer.getValues();
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL],totalDelta,20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN],ownDelta,20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL],avgTotal,20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN],avgOwn,20);
avgTotal=totalDelta;
avgOwn=ownDelta;
cron=new InstrumentationService.Cron();
cron.start();
ownStart=Time.now();
totalStart=ownStart;
ownDelta=0;
sleep(300);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
sleep(300);
cron.start();
ownStart=Time.now();
sleep(300);
cron.stop();
ownEnd=Time.now();
ownDelta+=ownEnd - ownStart;
totalEnd=ownEnd;
totalDelta=totalEnd - totalStart;
avgTotal=(avgTotal * 1 + totalDelta) / 2;
avgOwn=(avgOwn * 1 + ownDelta) / 2;
cron.stop();
timer.addCron(cron);
values=timer.getValues();
assertEquals(values[InstrumentationService.Timer.LAST_TOTAL],totalDelta,20);
assertEquals(values[InstrumentationService.Timer.LAST_OWN],ownDelta,20);
assertEquals(values[InstrumentationService.Timer.AVG_TOTAL],avgTotal,20);
assertEquals(values[InstrumentationService.Timer.AVG_OWN],avgOwn,20);
JSONObject json=(JSONObject)new JSONParser().parse(timer.toJSONString());
assertEquals(json.size(),4);
assertEquals(json.get("lastTotal"),values[InstrumentationService.Timer.LAST_TOTAL]);
assertEquals(json.get("lastOwn"),values[InstrumentationService.Timer.LAST_OWN]);
assertEquals(json.get("avgTotal"),values[InstrumentationService.Timer.AVG_TOTAL]);
assertEquals(json.get("avgOwn"),values[InstrumentationService.Timer.AVG_OWN]);
StringWriter writer=new StringWriter();
timer.writeJSONString(writer);
writer.close();
json=(JSONObject)new JSONParser().parse(writer.toString());
assertEquals(json.size(),4);
assertEquals(json.get("lastTotal"),values[InstrumentationService.Timer.LAST_TOTAL]);
assertEquals(json.get("lastOwn"),values[InstrumentationService.Timer.LAST_OWN]);
assertEquals(json.get("avgTotal"),values[InstrumentationService.Timer.AVG_TOTAL]);
assertEquals(json.get("avgOwn"),values[InstrumentationService.Timer.AVG_OWN]);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void variableHolder() throws Exception {
InstrumentationService.VariableHolder variableHolder=new InstrumentationService.VariableHolder();
variableHolder.var=new Instrumentation.Variable(){
@Override public String getValue(){
return "foo";
}
}
;
JSONObject json=(JSONObject)new JSONParser().parse(variableHolder.toJSONString());
assertEquals(json.size(),1);
assertEquals(json.get("value"),"foo");
StringWriter writer=new StringWriter();
variableHolder.writeJSONString(writer);
writer.close();
json=(JSONObject)new JSONParser().parse(writer.toString());
assertEquals(json.size(),1);
assertEquals(json.get("value"),"foo");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test @TestDir @SuppressWarnings("unchecked") public void sampling() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName()));
Configuration conf=new Configuration(false);
conf.set("server.services",services);
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
Instrumentation instrumentation=server.get(Instrumentation.class);
final AtomicInteger count=new AtomicInteger();
Instrumentation.Variable varToSample=new Instrumentation.Variable(){
@Override public Long getValue(){
return (long)count.incrementAndGet();
}
}
;
instrumentation.addSampler("g","s",10,varToSample);
sleep(2000);
int i=count.get();
assertTrue(i > 0);
Map> snapshot=instrumentation.getSnapshot();
Map> samplers=(Map>)snapshot.get("samplers");
InstrumentationService.Sampler sampler=(InstrumentationService.Sampler)samplers.get("g").get("s");
assertTrue(sampler.getRate() > 0);
server.destroy();
}
InternalCallVerifier NullVerifier
@Test @TestDir public void service() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf=new Configuration(false);
conf.set("server.services",StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName())));
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
assertNotNull(server.get(Scheduler.class));
server.destroy();
}
InternalCallVerifier IdentityVerifier NullVerifier HybridVerifier
@Test @TestDir public void service() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf=new Configuration(false);
conf.set("server.services",StringUtils.join(",",Arrays.asList(GroupsService.class.getName())));
Server server=new Server("server",dir,dir,dir,dir,conf);
server.init();
Groups groups=server.get(Groups.class);
assertNotNull(groups);
List g=groups.getGroups(System.getProperty("user.name"));
assertNotSame(g.size(),0);
server.destroy();
}
InternalCallVerifier EqualityVerifier
@Test @TestDir public void testResolveAuthority() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
System.setProperty("TestServerWebApp3.home.dir",dir);
System.setProperty("TestServerWebApp3.config.dir",dir);
System.setProperty("TestServerWebApp3.log.dir",dir);
System.setProperty("TestServerWebApp3.temp.dir",dir);
System.setProperty("testserverwebapp3.http.hostname","localhost");
System.setProperty("testserverwebapp3.http.port","14000");
ServerWebApp server=new ServerWebApp("TestServerWebApp3"){
}
;
InetSocketAddress address=server.resolveAuthority();
Assert.assertEquals("localhost",address.getHostName());
Assert.assertEquals(14000,address.getPort());
}
InternalCallVerifier EqualityVerifier
@Test @TestDir public void lifecycle() throws Exception {
String dir=TestDirHelper.getTestDir().getAbsolutePath();
System.setProperty("TestServerWebApp1.home.dir",dir);
System.setProperty("TestServerWebApp1.config.dir",dir);
System.setProperty("TestServerWebApp1.log.dir",dir);
System.setProperty("TestServerWebApp1.temp.dir",dir);
ServerWebApp server=new ServerWebApp("TestServerWebApp1"){
}
;
assertEquals(server.getStatus(),Server.Status.UNDEF);
server.contextInitialized(null);
assertEquals(server.getStatus(),Server.Status.NORMAL);
server.contextDestroyed(null);
assertEquals(server.getStatus(),Server.Status.SHUTDOWN);
}
InternalCallVerifier EqualityVerifier
@Test public void testVarResolutionAndSysProps(){
String userName=System.getProperty("user.name");
Configuration conf=new Configuration(false);
conf.set("a","A");
conf.set("b","${a}");
conf.set("c","${user.name}");
conf.set("d","${aaa}");
assertEquals(conf.getRaw("a"),"A");
assertEquals(conf.getRaw("b"),"${a}");
assertEquals(conf.getRaw("c"),"${user.name}");
assertEquals(conf.get("a"),"A");
assertEquals(conf.get("b"),"A");
assertEquals(conf.get("c"),userName);
assertEquals(conf.get("d"),"${aaa}");
conf.set("user.name","foo");
assertEquals(conf.get("user.name"),"foo");
}
InternalCallVerifier EqualityVerifier
@Test public void constructors() throws Exception {
Configuration conf=new Configuration(false);
assertEquals(conf.size(),0);
byte[] bytes="a A ".getBytes();
InputStream is=new ByteArrayInputStream(bytes);
conf=new Configuration(false);
ConfigurationUtils.load(conf,is);
assertEquals(conf.size(),1);
assertEquals(conf.get("a"),"A");
}
InternalCallVerifier EqualityVerifier
@Test public void copy() throws Exception {
Configuration srcConf=new Configuration(false);
Configuration targetConf=new Configuration(false);
srcConf.set("testParameter1","valueFromSource");
srcConf.set("testParameter2","valueFromSource");
targetConf.set("testParameter2","valueFromTarget");
targetConf.set("testParameter3","valueFromTarget");
ConfigurationUtils.copy(srcConf,targetConf);
assertEquals("valueFromSource",targetConf.get("testParameter1"));
assertEquals("valueFromSource",targetConf.get("testParameter2"));
assertEquals("valueFromTarget",targetConf.get("testParameter3"));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void injectDefaults() throws Exception {
Configuration srcConf=new Configuration(false);
Configuration targetConf=new Configuration(false);
srcConf.set("testParameter1","valueFromSource");
srcConf.set("testParameter2","valueFromSource");
targetConf.set("testParameter2","originalValueFromTarget");
targetConf.set("testParameter3","originalValueFromTarget");
ConfigurationUtils.injectDefaults(srcConf,targetConf);
assertEquals("valueFromSource",targetConf.get("testParameter1"));
assertEquals("originalValueFromTarget",targetConf.get("testParameter2"));
assertEquals("originalValueFromTarget",targetConf.get("testParameter3"));
assertEquals("valueFromSource",srcConf.get("testParameter1"));
assertEquals("valueFromSource",srcConf.get("testParameter2"));
assertNull(srcConf.get("testParameter3"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void resolve(){
Configuration conf=new Configuration(false);
conf.set("a","A");
conf.set("b","${a}");
assertEquals(conf.getRaw("a"),"A");
assertEquals(conf.getRaw("b"),"${a}");
conf=ConfigurationUtils.resolve(conf);
assertEquals(conf.getRaw("a"),"A");
assertEquals(conf.getRaw("b"),"A");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void test() throws Exception {
JSONMapProvider p=new JSONMapProvider();
assertTrue(p.isWriteable(Map.class,null,null,null));
assertFalse(p.isWriteable(this.getClass(),null,null,null));
assertEquals(p.getSize(null,null,null,null,null),-1);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
JSONObject json=new JSONObject();
json.put("a","A");
p.writeTo(json,JSONObject.class,null,null,null,null,baos);
baos.close();
assertEquals(new String(baos.toByteArray()).trim(),"{\"a\":\"A\"}");
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void test() throws Exception {
JSONProvider p=new JSONProvider();
assertTrue(p.isWriteable(JSONObject.class,null,null,null));
assertFalse(p.isWriteable(this.getClass(),null,null,null));
assertEquals(p.getSize(null,null,null,null,null),-1);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
JSONObject json=new JSONObject();
json.put("a","A");
p.writeTo(json,JSONObject.class,null,null,null,null,baos);
baos.close();
assertEquals(new String(baos.toByteArray()).trim(),"{\"a\":\"A\"}");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNestedException() throws Throwable {
Exception e=new NoRouteToHostException("that box caught fire 3 years ago");
Exception ioe=new IOException("Datacenter problems",e);
ThrowableInformation ti=new ThrowableInformation(ioe);
Log4Json l4j=new Log4Json();
long timeStamp=Time.now();
String outcome=l4j.toJson(new StringWriter(),"testNestedException",timeStamp,"INFO","quoted\"","new line\n and {}",ti).toString();
println("testNestedException",outcome);
ContainerNode rootNode=Log4Json.parse(outcome);
assertEntryEquals(rootNode,Log4Json.LEVEL,"INFO");
assertEntryEquals(rootNode,Log4Json.NAME,"testNestedException");
assertEntryEquals(rootNode,Log4Json.TIME,timeStamp);
assertEntryEquals(rootNode,Log4Json.EXCEPTION_CLASS,ioe.getClass().getName());
JsonNode node=assertNodeContains(rootNode,Log4Json.STACK);
assertTrue("Not an array: " + node,node.isArray());
node=assertNodeContains(rootNode,Log4Json.DATE);
assertTrue("Not a string: " + node,node.isTextual());
String dateText=node.getTextValue();
assertTrue("No '-' in " + dateText,dateText.contains("-"));
assertTrue("No '-' in " + dateText,dateText.contains(":"));
}
InternalCallVerifier EqualityVerifier
@Test public void testCleanupTaskReportsWithNullJob() throws Exception {
TestJobClient client=new TestJobClient(new JobConf());
Cluster mockCluster=mock(Cluster.class);
client.setCluster(mockCluster);
JobID id=new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result=client.getCleanupTaskReports(id);
assertEquals(0,result.length);
verify(mockCluster).getJob(id);
}
InternalCallVerifier EqualityVerifier
@Test public void testMapTaskReportsWithNullJob() throws Exception {
TestJobClient client=new TestJobClient(new JobConf());
Cluster mockCluster=mock(Cluster.class);
client.setCluster(mockCluster);
JobID id=new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result=client.getMapTaskReports(id);
assertEquals(0,result.length);
verify(mockCluster).getJob(id);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetupTaskReportsWithNullJob() throws Exception {
TestJobClient client=new TestJobClient(new JobConf());
Cluster mockCluster=mock(Cluster.class);
client.setCluster(mockCluster);
JobID id=new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result=client.getSetupTaskReports(id);
assertEquals(0,result.length);
verify(mockCluster).getJob(id);
}
InternalCallVerifier EqualityVerifier
@Test public void testReduceTaskReportsWithNullJob() throws Exception {
TestJobClient client=new TestJobClient(new JobConf());
Cluster mockCluster=mock(Cluster.class);
client.setCluster(mockCluster);
JobID id=new JobID("test",0);
when(mockCluster.getJob(id)).thenReturn(null);
TaskReport[] result=client.getReduceTaskReports(id);
assertEquals(0,result.length);
verify(mockCluster).getJob(id);
}
InternalCallVerifier NullVerifier
@Test public void testGetJobWithUnknownJob() throws Exception {
TestJobClient client=new TestJobClient(new JobConf());
Cluster mockCluster=mock(Cluster.class);
client.setCluster(mockCluster);
JobID id=new JobID("unknown",0);
when(mockCluster.getJob(id)).thenReturn(null);
assertNull(client.getJob(id));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRedirect() throws Exception {
Configuration conf=new YarnConfiguration();
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_ADDRESS,RMADDRESS);
conf.set(JHAdminConfig.MR_HISTORY_ADDRESS,HSHOSTADDRESS);
RMService rmService=new RMService("test");
rmService.init(conf);
rmService.start();
AMService amService=new AMService();
amService.init(conf);
amService.start(conf);
HistoryService historyService=new HistoryService();
historyService.init(conf);
historyService.start(conf);
LOG.info("services started");
Cluster cluster=new Cluster(conf);
org.apache.hadoop.mapreduce.JobID jobID=new org.apache.hadoop.mapred.JobID("201103121733",1);
org.apache.hadoop.mapreduce.Counters counters=cluster.getJob(jobID).getCounters();
validateCounters(counters);
Assert.assertTrue(amContact);
LOG.info("Sleeping for 5 seconds before stop for" + " the client socket to not get EOF immediately..");
Thread.sleep(5000);
amService.stop();
LOG.info("Sleeping for 5 seconds after stop for" + " the server to exit cleanly..");
Thread.sleep(5000);
amRestarting=true;
counters=cluster.getJob(jobID).getCounters();
Assert.assertEquals(0,counters.countCounters());
Job job=cluster.getJob(jobID);
org.apache.hadoop.mapreduce.TaskID taskId=new org.apache.hadoop.mapreduce.TaskID(jobID,TaskType.MAP,0);
TaskAttemptID tId=new TaskAttemptID(taskId,0);
job.killJob();
job.killTask(tId);
job.failTask(tId);
job.getTaskCompletionEvents(0,100);
job.getStatus();
job.getTaskDiagnostics(tId);
job.getTaskReports(TaskType.MAP);
job.getTrackingURL();
amRestarting=false;
amService=new AMService();
amService.init(conf);
amService.start(conf);
amContact=false;
counters=cluster.getJob(jobID).getCounters();
validateCounters(counters);
Assert.assertTrue(amContact);
amService.stop();
counters=cluster.getJob(jobID).getCounters();
validateCounters(counters);
Assert.assertTrue(hsContact);
rmService.stop();
historyService.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testJobReportFromHistoryServer() throws Exception {
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponseFromHistoryServer());
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null);
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm);
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("TestJobFilePath",jobStatus.getJobFile());
Assert.assertEquals("http://TestTrackingUrl",jobStatus.getTrackingUrl());
Assert.assertEquals(1.0f,jobStatus.getMapProgress(),0.0f);
Assert.assertEquals(1.0f,jobStatus.getReduceProgress(),0.0f);
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRetriesOnAMConnectionFailures() throws Exception {
if (!isAMReachableFromClient) {
return;
}
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(getRunningApplicationReport("am1",78));
final MRClientProtocol amProxy=mock(MRClientProtocol.class);
when(amProxy.getJobReport(any(GetJobReportRequest.class))).thenThrow(new RuntimeException("11")).thenThrow(new RuntimeException("22")).thenThrow(new RuntimeException("33")).thenThrow(new RuntimeException("44")).thenReturn(getJobReportResponse());
Configuration conf=new YarnConfiguration();
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,!isAMReachableFromClient);
ClientServiceDelegate clientServiceDelegate=new ClientServiceDelegate(conf,rm,oldJobId,null){
@Override MRClientProtocol instantiateAMProxy( final InetSocketAddress serviceAddr) throws IOException {
super.instantiateAMProxy(serviceAddr);
return amProxy;
}
}
;
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals(conf.getInt(MRJobConfig.MR_CLIENT_MAX_RETRIES,MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES),clientServiceDelegate.getMaxClientRetry());
verify(amProxy,times(5)).getJobReport(any(GetJobReportRequest.class));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAMAccessDisabled() throws IOException {
if (isAMReachableFromClient) {
return;
}
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponseFromHistoryServer());
ResourceMgrDelegate rmDelegate=mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getFinishedApplicationReport());
}
catch ( YarnException e) {
throw new IOException(e);
}
ClientServiceDelegate clientServiceDelegate=spy(getClientServiceDelegate(historyServerProxy,rmDelegate));
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A",jobStatus.getJobName());
verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class));
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A",jobStatus.getJobName());
verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class));
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A",jobStatus.getJobName());
verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class));
JobStatus jobStatus1=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus1);
Assert.assertEquals("TestJobFilePath",jobStatus1.getJobFile());
Assert.assertEquals("http://TestTrackingUrl",jobStatus1.getTrackingUrl());
Assert.assertEquals(1.0f,jobStatus1.getMapProgress(),0.0f);
Assert.assertEquals(1.0f,jobStatus1.getReduceProgress(),0.0f);
verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class));
}
InternalCallVerifier NullVerifier
@Test public void testRMDownRestoreForJobStatusBeforeGetAMReport() throws IOException {
Configuration conf=new YarnConfiguration();
conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES,3);
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,!isAMReachableFromClient);
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(getJobReportResponse());
ResourceMgrDelegate rmDelegate=mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenThrow(new java.lang.reflect.UndeclaredThrowableException(new IOException("Connection refuced1"))).thenThrow(new java.lang.reflect.UndeclaredThrowableException(new IOException("Connection refuced2"))).thenReturn(getFinishedApplicationReport());
ClientServiceDelegate clientServiceDelegate=new ClientServiceDelegate(conf,rmDelegate,oldJobId,historyServerProxy);
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
verify(rmDelegate,times(3)).getApplicationReport(any(ApplicationId.class));
Assert.assertNotNull(jobStatus);
}
catch ( YarnException e) {
throw new IOException(e);
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testUnknownAppInRM() throws Exception {
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponse());
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,getRMDelegate());
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCountersFromHistoryServer() throws Exception {
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getCounters(getCountersRequest())).thenReturn(getCountersResponseFromHistoryServer());
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null);
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm);
Counters counters=TypeConverter.toYarn(clientServiceDelegate.getJobCounters(oldJobId));
Assert.assertNotNull(counters);
Assert.assertEquals(1001,counters.getCounterGroup("dummyCounters").getCounter("dummyCounter").getValue());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testRetriesOnConnectionFailure() throws Exception {
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
when(historyServerProxy.getJobReport(getJobReportRequest())).thenThrow(new RuntimeException("1")).thenThrow(new RuntimeException("2")).thenReturn(getJobReportResponse());
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null);
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm);
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
verify(historyServerProxy,times(3)).getJobReport(any(GetJobReportRequest.class));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReconnectOnAMRestart() throws IOException {
if (!isAMReachableFromClient) {
return;
}
MRClientProtocol historyServerProxy=mock(MRClientProtocol.class);
ResourceMgrDelegate rmDelegate=mock(ResourceMgrDelegate.class);
try {
when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport(null,0)).thenReturn(getRunningApplicationReport(null,0)).thenReturn(getRunningApplicationReport("am2",90));
}
catch ( YarnException e) {
throw new IOException(e);
}
GetJobReportResponse jobReportResponse1=mock(GetJobReportResponse.class);
when(jobReportResponse1.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"jobName-firstGen","user",JobState.RUNNING,0,0,0,0,0,0,0,"anything",null,false,""));
MRClientProtocol firstGenAMProxy=mock(MRClientProtocol.class);
when(firstGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse1).thenThrow(new RuntimeException("AM is down!"));
GetJobReportResponse jobReportResponse2=mock(GetJobReportResponse.class);
when(jobReportResponse2.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"jobName-secondGen","user",JobState.RUNNING,0,0,0,0,0,0,0,"anything",null,false,""));
MRClientProtocol secondGenAMProxy=mock(MRClientProtocol.class);
when(secondGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse2);
ClientServiceDelegate clientServiceDelegate=spy(getClientServiceDelegate(historyServerProxy,rmDelegate));
doReturn(firstGenAMProxy).doReturn(secondGenAMProxy).when(clientServiceDelegate).instantiateAMProxy(any(InetSocketAddress.class));
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-firstGen",jobStatus.getJobName());
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-secondGen",jobStatus.getJobName());
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("jobName-secondGen",jobStatus.getJobName());
verify(clientServiceDelegate,times(2)).instantiateAMProxy(any(InetSocketAddress.class));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testHistoryServerNotConfigured() throws Exception {
ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(null,getRMDelegate());
JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertEquals("N/A",jobStatus.getUsername());
Assert.assertEquals(JobStatus.State.PREP,jobStatus.getState());
ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class);
ApplicationReport applicationReport=getFinishedApplicationReport();
when(rm.getApplicationReport(jobId.getAppId())).thenReturn(applicationReport);
clientServiceDelegate=getClientServiceDelegate(null,rm);
jobStatus=clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertEquals(applicationReport.getUser(),jobStatus.getUsername());
Assert.assertEquals(JobStatus.State.SUCCEEDED,jobStatus.getState());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testClock(){
Clock clock=new Clock();
long templateTime=System.currentTimeMillis();
long time=clock.getTime();
assertEquals(templateTime,time,30);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test(timeout=1000) public void testGraylistedTrackers(){
Assert.assertEquals(0,clusterStatus.getGraylistedTrackers());
Assert.assertTrue(clusterStatus.getGraylistedTrackerNames().isEmpty());
}
InternalCallVerifier EqualityVerifier
/**
* Test getSplits
*/
@Test @SuppressWarnings("unchecked") public void testSplits() throws IOException {
JobConf job=new JobConf(defaultConf);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"test.txt"),"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
FileInputFormat.setInputPaths(job,workDir);
CombineFileInputFormat format=new CombineFileInputFormat(){
@Override public RecordReader getRecordReader( InputSplit split, JobConf job, Reporter reporter) throws IOException {
return new CombineFileRecordReader(job,(CombineFileSplit)split,reporter,CombineFileRecordReader.class);
}
}
;
final int SIZE_SPLITS=1;
LOG.info("Trying to getSplits with splits = " + SIZE_SPLITS);
InputSplit[] splits=format.getSplits(job,SIZE_SPLITS);
LOG.info("Got getSplits = " + splits.length);
assertEquals("splits == " + SIZE_SPLITS,SIZE_SPLITS,splits.length);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
JobConf job=new JobConf(conf);
Reporter reporter=Reporter.NULL;
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
InputFormat format=new CombineSequenceFileInputFormat();
IntWritable key=new IntWritable();
BytesWritable value=new BytesWritable();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.info("splitting: got = " + splits.length);
assertEquals("We got more than one splits!",1,splits.length);
InputSplit split=splits[0];
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
RecordReader reader=format.getRecordReader(split,job,reporter);
try {
while (reader.next(key,value)) {
assertFalse("Key in multiple partitions.",bits.get(key.get()));
bits.set(key.get());
}
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
JobConf job=new JobConf(defaultConf);
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
CombineTextInputFormat format=new CombineTextInputFormat();
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / 20) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.info("splitting: got = " + splits.length);
assertEquals("We got more than one splits!",1,splits.length);
InputSplit split=splits[0];
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
LOG.debug("split= " + split);
RecordReader reader=format.getRecordReader(split,job,voidReporter);
try {
int count=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.info("splits=" + split + " count="+ count);
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test using the gzip codec for reading
*/
@Test(timeout=10000) public void testGzip() throws IOException {
JobConf job=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,job);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(job,workDir);
CombineTextInputFormat format=new CombineTextInputFormat();
InputSplit[] splits=format.getSplits(job,100);
assertEquals("compressed splits == 1",1,splits.length);
List results=readSplit(format,splits[0],job);
assertEquals("splits[0] length",8,results.size());
final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"};
final String[] secondList={"this is a test","of gzip"};
String first=results.get(0).toString();
if (first.equals(firstList[0])) {
testResults(results,firstList,secondList);
}
else if (first.equals(secondList[0])) {
testResults(results,secondList,firstList);
}
else {
fail("unexpected first token!");
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test using the raw Inflater codec for reading gzip files.
*/
@Test public void testPrototypeInflaterGzip() throws IOException {
CompressionCodec gzip=new GzipCodec();
localFs.delete(workDir,true);
System.out.println(COLOR_BR_BLUE + "testPrototypeInflaterGzip() using " + "non-native/Java Inflater and manual gzip header/trailer parsing"+ COLOR_NORMAL);
final String fn="concat" + gzip.getDefaultExtension();
Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn);
Path fnHDFS=new Path(workDir,fn);
localFs.copyFromLocalFile(fnLocal,fnHDFS);
final FileInputStream in=new FileInputStream(fnLocal.toString());
assertEquals("concat bytes available",148,in.available());
byte[] compressedBuf=new byte[256];
int numBytesRead=in.read(compressedBuf,0,10);
assertEquals("header bytes read",10,numBytesRead);
assertEquals("1st byte",0x1f,compressedBuf[0] & 0xff);
assertEquals("2nd byte",0x8b,compressedBuf[1] & 0xff);
assertEquals("3rd byte (compression method)",8,compressedBuf[2] & 0xff);
byte flags=(byte)(compressedBuf[3] & 0xff);
if ((flags & 0x04) != 0) {
numBytesRead=in.read(compressedBuf,0,2);
assertEquals("XLEN bytes read",2,numBytesRead);
int xlen=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff;
in.skip(xlen);
}
if ((flags & 0x08) != 0) {
while ((numBytesRead=in.read()) != 0) {
assertFalse("unexpected end-of-file while reading filename",numBytesRead == -1);
}
}
if ((flags & 0x10) != 0) {
while ((numBytesRead=in.read()) != 0) {
assertFalse("unexpected end-of-file while reading comment",numBytesRead == -1);
}
}
if ((flags & 0xe0) != 0) {
assertTrue("reserved bits are set??",(flags & 0xe0) == 0);
}
if ((flags & 0x02) != 0) {
numBytesRead=in.read(compressedBuf,0,2);
assertEquals("CRC16 bytes read",2,numBytesRead);
int crc16=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff;
}
numBytesRead=in.read(compressedBuf);
byte[] uncompressedBuf=new byte[256];
Inflater inflater=new Inflater(true);
inflater.setInput(compressedBuf,0,numBytesRead);
try {
int numBytesUncompressed=inflater.inflate(uncompressedBuf);
String outString=new String(uncompressedBuf,0,numBytesUncompressed,"UTF-8");
System.out.println("uncompressed data of first gzip member = [" + outString + "]");
}
catch ( java.util.zip.DataFormatException ex) {
throw new IOException(ex.getMessage());
}
in.close();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the bzip2 codec for reading
*/
@Test public void testBzip2() throws IOException {
JobConf jobConf=new JobConf(defaultConf);
CompressionCodec bzip2=new BZip2Codec();
ReflectionUtils.setConf(bzip2,jobConf);
localFs.delete(workDir,true);
System.out.println(COLOR_BR_CYAN + "testBzip2() using non-native CBZip2InputStream (presumably)" + COLOR_NORMAL);
final String fn="concat" + bzip2.getDefaultExtension();
Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn);
Path fnHDFS=new Path(workDir,fn);
localFs.copyFromLocalFile(fnLocal,fnHDFS);
writeFile(localFs,new Path(workDir,"part2.txt.bz2"),bzip2,"this is a test\nof bzip2\n");
FileInputFormat.setInputPaths(jobConf,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(jobConf);
format.setMinSplitSize(256);
InputSplit[] splits=format.getSplits(jobConf,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.bz2")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],jobConf);
assertEquals("splits[0] num lines",6,results.size());
assertEquals("splits[0][5]","member #3",results.get(5).toString());
results=readSplit(format,splits[1],jobConf);
assertEquals("splits[1] num lines",2,results.size());
assertEquals("splits[1][0]","this is a test",results.get(0).toString());
assertEquals("splits[1][1]","of bzip2",results.get(1).toString());
}
InternalCallVerifier EqualityVerifier
/**
* Extended bzip2 test, similar to BuiltInGzipDecompressor test above.
*/
@Test public void testMoreBzip2() throws IOException {
JobConf jobConf=new JobConf(defaultConf);
CompressionCodec bzip2=new BZip2Codec();
ReflectionUtils.setConf(bzip2,jobConf);
localFs.delete(workDir,true);
System.out.println(COLOR_BR_MAGENTA + "testMoreBzip2() using non-native CBZip2InputStream (presumably)" + COLOR_NORMAL);
String fn1="testConcatThenCompress.txt" + bzip2.getDefaultExtension();
Path fnLocal1=new Path(System.getProperty("test.concat.data","/tmp"),fn1);
Path fnHDFS1=new Path(workDir,fn1);
localFs.copyFromLocalFile(fnLocal1,fnHDFS1);
String fn2="testCompressThenConcat.txt" + bzip2.getDefaultExtension();
Path fnLocal2=new Path(System.getProperty("test.concat.data","/tmp"),fn2);
Path fnHDFS2=new Path(workDir,fn2);
localFs.copyFromLocalFile(fnLocal2,fnHDFS2);
FileInputFormat.setInputPaths(jobConf,workDir);
final FileInputStream in1=new FileInputStream(fnLocal1.toString());
final FileInputStream in2=new FileInputStream(fnLocal2.toString());
assertEquals("concat bytes available",2567,in1.available());
assertEquals("concat bytes available",3056,in2.available());
doMultipleBzip2BufferSizes(jobConf,false);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using Hadoop's original, native-zlib gzip codec for reading.
*/
@Test public void testGzip() throws IOException {
JobConf jobConf=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,jobConf);
localFs.delete(workDir,true);
if (org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor.class == gzip.getDecompressorType()) {
System.out.println(COLOR_BR_RED + "testGzip() using native-zlib Decompressor (" + gzip.getDecompressorType()+ ")"+ COLOR_NORMAL);
}
else {
LOG.warn("testGzip() skipped: native (C/C++) libs not loaded");
return;
}
final String fn="concat" + gzip.getDefaultExtension();
Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn);
Path fnHDFS=new Path(workDir,fn);
localFs.copyFromLocalFile(fnLocal,fnHDFS);
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(jobConf,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(jobConf);
InputSplit[] splits=format.getSplits(jobConf,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],jobConf);
assertEquals("splits[0] num lines",6,results.size());
assertEquals("splits[0][5]","member #3",results.get(5).toString());
results=readSplit(format,splits[1],jobConf);
assertEquals("splits[1] num lines",2,results.size());
assertEquals("splits[1][0]","this is a test",results.get(0).toString());
assertEquals("splits[1][1]","of gzip",results.get(1).toString());
}
InternalCallVerifier EqualityVerifier
/**
* Test using the new BuiltInGzipDecompressor codec for reading gzip files.
*/
@Test public void testBuiltInGzipDecompressor() throws IOException {
JobConf jobConf=new JobConf(defaultConf);
jobConf.setBoolean("io.native.lib.available",false);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,jobConf);
localFs.delete(workDir,true);
assertEquals("[non-native (Java) codec]",org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor.class,gzip.getDecompressorType());
System.out.println(COLOR_BR_YELLOW + "testBuiltInGzipDecompressor() using" + " non-native (Java Inflater) Decompressor ("+ gzip.getDecompressorType()+ ")"+ COLOR_NORMAL);
String fn1="testConcatThenCompress.txt" + gzip.getDefaultExtension();
Path fnLocal1=new Path(System.getProperty("test.concat.data","/tmp"),fn1);
Path fnHDFS1=new Path(workDir,fn1);
localFs.copyFromLocalFile(fnLocal1,fnHDFS1);
String fn2="testCompressThenConcat.txt" + gzip.getDefaultExtension();
Path fnLocal2=new Path(System.getProperty("test.concat.data","/tmp"),fn2);
Path fnHDFS2=new Path(workDir,fn2);
localFs.copyFromLocalFile(fnLocal2,fnHDFS2);
FileInputFormat.setInputPaths(jobConf,workDir);
final FileInputStream in1=new FileInputStream(fnLocal1.toString());
final FileInputStream in2=new FileInputStream(fnLocal2.toString());
assertEquals("concat bytes available",2734,in1.available());
assertEquals("concat bytes available",3413,in2.available());
CompressionInputStream cin2=gzip.createInputStream(in2);
LineReader in=new LineReader(cin2);
Text out=new Text();
int numBytes, totalBytes=0, lineNum=0;
while ((numBytes=in.readLine(out)) > 0) {
++lineNum;
totalBytes+=numBytes;
}
in.close();
assertEquals("total uncompressed bytes in concatenated test file",5346,totalBytes);
assertEquals("total uncompressed lines in concatenated test file",84,lineNum);
doMultipleGzipBufferSizes(jobConf,false);
doMultipleGzipBufferSizes(jobConf,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testLegacyGetGroupNames(){
Counters counters=new Counters();
counters.findCounter("fs1",FileSystemCounter.BYTES_READ).increment(1);
counters.findCounter("fs2",FileSystemCounter.BYTES_READ).increment(1);
counters.incrCounter("group1","counter1",1);
HashSet groups=new HashSet(counters.getGroupNames());
HashSet expectedGroups=new HashSet();
expectedGroups.add("group1");
expectedGroups.add("FileSystemCounters");
expectedGroups.add("org.apache.hadoop.mapreduce.FileSystemCounter");
assertEquals(expectedGroups,groups);
}
InternalCallVerifier BooleanVerifier
@Test public void testFileSystemGroupIteratorConcurrency(){
Counters counters=new Counters();
counters.findCounter("fs1",FileSystemCounter.BYTES_READ).increment(1);
counters.findCounter("fs2",FileSystemCounter.BYTES_READ).increment(1);
Group group=counters.getGroup(FileSystemCounter.class.getName());
Iterator iterator=group.iterator();
counters.findCounter("fs3",FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
counters.findCounter("fs3",FileSystemCounter.BYTES_READ).increment(1);
assertTrue(iterator.hasNext());
iterator.next();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMakeCompactString(){
final String GC1="group1.counter1:1";
final String GC2="group2.counter2:3";
Counters counters=new Counters();
counters.incrCounter("group1","counter1",1);
assertEquals("group1.counter1:1",counters.makeCompactString());
counters.incrCounter("group2","counter2",3);
String cs=counters.makeCompactString();
assertTrue("Bad compact string",cs.equals(GC1 + ',' + GC2) || cs.equals(GC2 + ',' + GC1));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Verify counter value works
*/
@SuppressWarnings("deprecation") @Test public void testCounterValue(){
Counters counters=new Counters();
final int NUMBER_TESTS=100;
final int NUMBER_INC=10;
final Random rand=new Random();
for (int i=0; i < NUMBER_TESTS; i++) {
long initValue=rand.nextInt();
long expectedValue=initValue;
Counter counter=counters.findCounter("foo","bar");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",expectedValue,counter.getValue());
for (int j=0; j < NUMBER_INC; j++) {
int incValue=rand.nextInt();
counter.increment(incValue);
expectedValue+=incValue;
assertEquals("Counter value is not incremented correctly",expectedValue,counter.getValue());
}
expectedValue=rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",expectedValue,counter.getValue());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=100000) public void testCustomShuffleTransfer() throws IOException {
File absLogDir=new File("target",TestFadvisedFileRegion.class.getSimpleName() + "LocDir").getAbsoluteFile();
String testDirPath=StringUtils.join(Path.SEPARATOR,new String[]{absLogDir.getAbsolutePath(),"testCustomShuffleTransfer"});
File testDir=new File(testDirPath);
testDir.mkdirs();
System.out.println(testDir.getAbsolutePath());
File inFile=new File(testDir,"fileIn.out");
File outFile=new File(testDir,"fileOut.out");
byte[] initBuff=new byte[FILE_SIZE];
Random rand=new Random();
rand.nextBytes(initBuff);
FileOutputStream out=new FileOutputStream(inFile);
try {
out.write(initBuff);
}
finally {
IOUtils.cleanup(LOG,out);
}
int position=2 * 1024 * 1024;
int count=4 * 1024 * 1024 - 1;
RandomAccessFile inputFile=null;
RandomAccessFile targetFile=null;
WritableByteChannel target=null;
FadvisedFileRegion fileRegion=null;
try {
inputFile=new RandomAccessFile(inFile.getAbsolutePath(),"r");
targetFile=new RandomAccessFile(outFile.getAbsolutePath(),"rw");
target=targetFile.getChannel();
Assert.assertEquals(FILE_SIZE,inputFile.length());
fileRegion=new FadvisedFileRegion(inputFile,position,count,false,0,null,null,1024,false);
customShuffleTransferCornerCases(fileRegion,target,count);
long pos=0;
long size;
while ((size=fileRegion.customShuffleTransfer(target,pos)) > 0) {
pos+=size;
}
Assert.assertEquals(count,(int)pos);
Assert.assertEquals(count,targetFile.length());
}
finally {
if (fileRegion != null) {
fileRegion.releaseExternalResources();
}
IOUtils.cleanup(LOG,target);
IOUtils.cleanup(LOG,targetFile);
IOUtils.cleanup(LOG,inputFile);
}
byte[] buff=new byte[FILE_SIZE];
FileInputStream in=new FileInputStream(outFile);
try {
int total=in.read(buff,0,count);
Assert.assertEquals(count,total);
for (int i=0; i < count; i++) {
Assert.assertEquals(initBuff[position + i],buff[i]);
}
}
finally {
IOUtils.cleanup(LOG,in);
}
inFile.delete();
outFile.delete();
testDir.delete();
absLogDir.delete();
}
InternalCallVerifier EqualityVerifier
@Test public void testListLocatedStatus() throws Exception {
Configuration conf=getConfiguration();
conf.setBoolean("fs.test.impl.disable.cache",false);
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2");
MockFileSystem mockFs=(MockFileSystem)new Path("test:///").getFileSystem(conf);
Assert.assertEquals("listLocatedStatus already called",0,mockFs.numListLocatedStatusCalls);
JobConf job=new JobConf(conf);
TextInputFormat fileInputFormat=new TextInputFormat();
fileInputFormat.configure(job);
InputSplit[] splits=fileInputFormat.getSplits(job,1);
Assert.assertEquals("Input splits are not correct",2,splits.length);
Assert.assertEquals("listLocatedStatuss calls",1,mockFs.numListLocatedStatusCalls);
FileSystem.closeAll();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSplitLocationInfo() throws Exception {
Configuration conf=getConfiguration();
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2");
JobConf job=new JobConf(conf);
TextInputFormat fileInputFormat=new TextInputFormat();
fileInputFormat.configure(job);
FileSplit[] splits=(FileSplit[])fileInputFormat.getSplits(job,1);
String[] locations=splits[0].getLocations();
Assert.assertEquals(2,locations.length);
SplitLocationInfo[] locationInfo=splits[0].getLocationInfo();
Assert.assertEquals(2,locationInfo.length);
SplitLocationInfo localhostInfo=locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1];
SplitLocationInfo otherhostInfo=locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1];
Assert.assertTrue(localhostInfo.isOnDisk());
Assert.assertTrue(localhostInfo.isInMemory());
Assert.assertTrue(otherhostInfo.isOnDisk());
Assert.assertFalse(otherhostInfo.isInMemory());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatusErrorOnNonExistantDir() throws IOException {
Configuration conf=new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.configureTestErrorOnNonExistantDir(conf,localFs);
JobConf jobConf=new JobConf(conf);
TextInputFormat fif=new TextInputFormat();
fif.configure(jobConf);
try {
fif.listStatus(jobConf);
Assert.fail("Expecting an IOException for a missing Input path");
}
catch ( IOException e) {
Path expectedExceptionPath=new Path(TEST_ROOT_DIR,"input2");
expectedExceptionPath=localFs.makeQualified(expectedExceptionPath);
Assert.assertTrue(e instanceof InvalidInputException);
Assert.assertEquals("Input path does not exist: " + expectedExceptionPath.toString(),e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec with two input files.
*/
@Test(timeout=5000) public void testGzipWithTwoInputs() throws IOException {
CompressionCodec gzip=new GzipCodec();
localFs.delete(workDir,true);
FixedLengthInputFormat format=new FixedLengthInputFormat();
JobConf job=new JobConf(defaultConf);
format.setRecordLength(job,5);
FileInputFormat.setInputPaths(job,workDir);
ReflectionUtils.setConf(gzip,job);
format.configure(job);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"one two threefour five six seveneightnine ten ");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"ten nine eightsevensix five four threetwo one ");
InputSplit[] splits=format.getSplits(job,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],job);
assertEquals("splits[0] length",10,results.size());
assertEquals("splits[0][5]","six ",results.get(5));
results=readSplit(format,splits[1],job);
assertEquals("splits[1] length",10,results.size());
assertEquals("splits[1][0]","ten ",results.get(0));
assertEquals("splits[1][1]","nine ",results.get(1));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testClusterNoAdmins(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),"");
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
String noAdminUser="testuser2";
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(noAdminUser,new String[]{});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertFalse("random user should not have view access",val);
val=aclsManager.checkAccess(callerUGI,JobACL.MODIFY_JOB,jobOwner,jobACLs.get(JobACL.MODIFY_JOB));
assertFalse("random user should not have modify access",val);
callerUGI=UserGroupInformation.createUserForTesting(jobOwner,new String[]{});
val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("owner should have view access",val);
val=aclsManager.checkAccess(callerUGI,JobACL.MODIFY_JOB,jobOwner,jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("owner should have modify access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testClusterAdmins(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),jobOwner);
conf.set(JobACL.MODIFY_JOB.getAclName(),jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
String clusterAdmin="testuser2";
conf.set(MRConfig.MR_ADMINS,clusterAdmin);
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(clusterAdmin,new String[]{});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("cluster admin should have view access",val);
val=aclsManager.checkAccess(callerUGI,JobACL.MODIFY_JOB,jobOwner,jobACLs.get(JobACL.MODIFY_JOB));
assertTrue("cluster admin should have modify access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testAclsOff(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,false);
String noAdminUser="testuser2";
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(noAdminUser,new String[]{});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("acls off so anyone should have access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testGroups(){
Map tmpJobACLs=new HashMap();
Configuration conf=new Configuration();
String jobOwner="testuser";
conf.set(JobACL.VIEW_JOB.getAclName(),jobOwner);
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
String user="testuser2";
String adminGroup="adminGroup";
conf.set(MRConfig.MR_ADMINS," " + adminGroup);
JobACLsManager aclsManager=new JobACLsManager(conf);
tmpJobACLs=aclsManager.constructJobACLs(conf);
final Map jobACLs=tmpJobACLs;
UserGroupInformation callerUGI=UserGroupInformation.createUserForTesting(user,new String[]{adminGroup});
boolean val=aclsManager.checkAccess(callerUGI,JobACL.VIEW_JOB,jobOwner,jobACLs.get(JobACL.VIEW_JOB));
assertTrue("user in admin group should have access",val);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testIsJobDirValid() throws IOException {
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
Path testDir=new Path(TEST_DIR);
fs.mkdirs(testDir);
Assert.assertFalse(JobClient.isJobDirValid(testDir,fs));
Path jobconf=new Path(testDir,"job.xml");
Path jobsplit=new Path(testDir,"job.split");
fs.create(jobconf);
fs.create(jobsplit);
Assert.assertTrue(JobClient.isJobDirValid(testDir,fs));
fs.delete(jobconf,true);
fs.delete(jobsplit,true);
}
InternalCallVerifier EqualityVerifier
@Test public void testGetClusterStatusWithLocalJobRunner() throws Exception {
Configuration conf=new Configuration();
conf.set(JTConfig.JT_IPC_ADDRESS,MRConfig.LOCAL_FRAMEWORK_NAME);
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.LOCAL_FRAMEWORK_NAME);
JobClient client=new JobClient(conf);
ClusterStatus clusterStatus=client.getClusterStatus(true);
Collection activeTrackerNames=clusterStatus.getActiveTrackerNames();
Assert.assertEquals(0,activeTrackerNames.size());
int blacklistedTrackers=clusterStatus.getBlacklistedTrackers();
Assert.assertEquals(0,blacklistedTrackers);
Collection blackListedTrackersInfo=clusterStatus.getBlackListedTrackersInfo();
Assert.assertEquals(0,blackListedTrackersInfo.size());
}
InternalCallVerifier NullVerifier
@SuppressWarnings("deprecation") @Test public void testGetRunningJobFromJobClient() throws Exception {
JobConf conf=new JobConf();
conf.set("mapreduce.framework.name","local");
FileInputFormat.addInputPath(conf,createTempFile("in","hello"));
Path outputDir=new Path(TEST_ROOT_DIR,getClass().getSimpleName());
outputDir.getFileSystem(conf).delete(outputDir,true);
FileOutputFormat.setOutputPath(conf,outputDir);
JobClient jc=new JobClient(conf);
RunningJob runningJob=jc.submitJob(conf);
assertNotNull("Running job",runningJob);
RunningJob newRunningJob=jc.getJob(runningJob.getID());
assertNotNull("New running job",newRunningJob);
}
InternalCallVerifier EqualityVerifier
/**
* Ensure that M/R 1.x applications can get and set task virtual memory with
* old property names
*/
@SuppressWarnings("deprecation") @Test(timeout=1000) public void testDeprecatedPropertyNameForTaskVmem(){
JobConf configuration=new JobConf();
configuration.setLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,1024);
configuration.setLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,1024);
Assert.assertEquals(1024,configuration.getMemoryForMapTask());
Assert.assertEquals(1024,configuration.getMemoryForReduceTask());
configuration.setLong(JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY,1025);
configuration.setLong(JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY,1025);
Assert.assertEquals(1025,configuration.getMemoryForMapTask());
Assert.assertEquals(1025,configuration.getMemoryForReduceTask());
configuration.setMemoryForMapTask(2048);
configuration.setMemoryForReduceTask(2048);
Assert.assertEquals(2048,configuration.getLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,-1));
Assert.assertEquals(2048,configuration.getLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,-1));
Assert.assertEquals(2048,configuration.getLong(JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY,-1));
Assert.assertEquals(2048,configuration.getLong(JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY,-1));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test getters and setters of JobConf
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testJobConf(){
JobConf conf=new JobConf();
Pattern pattern=conf.getJarUnpackPattern();
assertEquals(Pattern.compile("(?:classes/|lib/).*").toString(),pattern.toString());
assertFalse(conf.getKeepFailedTaskFiles());
conf.setKeepFailedTaskFiles(true);
assertTrue(conf.getKeepFailedTaskFiles());
assertNull(conf.getKeepTaskFilesPattern());
conf.setKeepTaskFilesPattern("123454");
assertEquals("123454",conf.getKeepTaskFilesPattern());
assertNotNull(conf.getWorkingDirectory());
conf.setWorkingDirectory(new Path("test"));
assertTrue(conf.getWorkingDirectory().toString().endsWith("test"));
assertEquals(1,conf.getNumTasksToExecutePerJvm());
assertNull(conf.getKeyFieldComparatorOption());
conf.setKeyFieldComparatorOptions("keySpec");
assertEquals("keySpec",conf.getKeyFieldComparatorOption());
assertFalse(conf.getUseNewReducer());
conf.setUseNewReducer(true);
assertTrue(conf.getUseNewReducer());
assertTrue(conf.getMapSpeculativeExecution());
assertTrue(conf.getReduceSpeculativeExecution());
assertTrue(conf.getSpeculativeExecution());
conf.setReduceSpeculativeExecution(false);
assertTrue(conf.getSpeculativeExecution());
conf.setMapSpeculativeExecution(false);
assertFalse(conf.getSpeculativeExecution());
assertFalse(conf.getMapSpeculativeExecution());
assertFalse(conf.getReduceSpeculativeExecution());
conf.setSessionId("ses");
assertEquals("ses",conf.getSessionId());
assertEquals(3,conf.getMaxTaskFailuresPerTracker());
conf.setMaxTaskFailuresPerTracker(2);
assertEquals(2,conf.getMaxTaskFailuresPerTracker());
assertEquals(0,conf.getMaxMapTaskFailuresPercent());
conf.setMaxMapTaskFailuresPercent(50);
assertEquals(50,conf.getMaxMapTaskFailuresPercent());
assertEquals(0,conf.getMaxReduceTaskFailuresPercent());
conf.setMaxReduceTaskFailuresPercent(70);
assertEquals(70,conf.getMaxReduceTaskFailuresPercent());
assertEquals(JobPriority.NORMAL.name(),conf.getJobPriority().name());
conf.setJobPriority(JobPriority.HIGH);
assertEquals(JobPriority.HIGH.name(),conf.getJobPriority().name());
assertNull(conf.getJobSubmitHostName());
conf.setJobSubmitHostName("hostname");
assertEquals("hostname",conf.getJobSubmitHostName());
assertNull(conf.getJobSubmitHostAddress());
conf.setJobSubmitHostAddress("ww");
assertEquals("ww",conf.getJobSubmitHostAddress());
assertFalse(conf.getProfileEnabled());
conf.setProfileEnabled(true);
assertTrue(conf.getProfileEnabled());
assertEquals(conf.getProfileTaskRange(true).toString(),"0-2");
assertEquals(conf.getProfileTaskRange(false).toString(),"0-2");
conf.setProfileTaskRange(true,"0-3");
assertEquals(conf.getProfileTaskRange(false).toString(),"0-2");
assertEquals(conf.getProfileTaskRange(true).toString(),"0-3");
assertNull(conf.getMapDebugScript());
conf.setMapDebugScript("mDbgScript");
assertEquals("mDbgScript",conf.getMapDebugScript());
assertNull(conf.getReduceDebugScript());
conf.setReduceDebugScript("rDbgScript");
assertEquals("rDbgScript",conf.getReduceDebugScript());
assertNull(conf.getJobLocalDir());
assertEquals("default",conf.getQueueName());
conf.setQueueName("qname");
assertEquals("qname",conf.getQueueName());
conf.setMemoryForMapTask(100 * 1000);
assertEquals(100 * 1000,conf.getMemoryForMapTask());
conf.setMemoryForReduceTask(1000 * 1000);
assertEquals(1000 * 1000,conf.getMemoryForReduceTask());
assertEquals(-1,conf.getMaxPhysicalMemoryForTask());
assertEquals("The variable key is no longer used.",JobConf.deprecatedString("key"));
assertEquals("mapreduce.map.java.opts should not be set by default",null,conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS));
assertEquals("mapreduce.reduce.java.opts should not be set by default",null,conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS));
}
InternalCallVerifier BooleanVerifier
@Test public void testNewCounterC() throws Exception {
final Job job=createJob();
final Configuration conf=job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR,3);
createWordsFile(inFiles[3],conf);
createWordsFile(inFiles[4],conf);
long inputSize=0;
inputSize+=getFileSize(inFiles[0]);
inputSize+=getFileSize(inFiles[1]);
inputSize+=getFileSize(inFiles[2]);
inputSize+=getFileSize(inFiles[3]);
inputSize+=getFileSize(inFiles[4]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,new Path(OUT_DIR,"outputN2"));
assertTrue(job.waitForCompletion(true));
final Counters c1=Counters.downgrade(job.getCounters());
validateCounters(c1,122880,25600,102400);
validateFileCounters(c1,inputSize,0,0,0);
}
InternalCallVerifier BooleanVerifier
@Test public void testNewCounterD() throws Exception {
final Job job=createJob();
final Configuration conf=job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR,2);
job.setNumReduceTasks(0);
removeWordsFile(inFiles[3],conf);
removeWordsFile(inFiles[4],conf);
long inputSize=0;
inputSize+=getFileSize(inFiles[0]);
inputSize+=getFileSize(inFiles[1]);
inputSize+=getFileSize(inFiles[2]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,new Path(OUT_DIR,"outputN3"));
assertTrue(job.waitForCompletion(true));
final Counters c1=Counters.downgrade(job.getCounters());
validateCounters(c1,0,15360,61440);
validateFileCounters(c1,inputSize,0,-1,-1);
}
InternalCallVerifier BooleanVerifier
@Test public void testNewCounterA() throws Exception {
final Job job=createJob();
final Configuration conf=job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR,2);
removeWordsFile(inFiles[3],conf);
removeWordsFile(inFiles[4],conf);
long inputSize=0;
inputSize+=getFileSize(inFiles[0]);
inputSize+=getFileSize(inFiles[1]);
inputSize+=getFileSize(inFiles[2]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,new Path(OUT_DIR,"outputN0"));
assertTrue(job.waitForCompletion(true));
final Counters c1=Counters.downgrade(job.getCounters());
validateCounters(c1,73728,15360,61440);
validateFileCounters(c1,inputSize,0,0,0);
}
InternalCallVerifier BooleanVerifier
@Test public void testNewCounterB() throws Exception {
final Job job=createJob();
final Configuration conf=job.getConfiguration();
conf.setInt(JobContext.IO_SORT_FACTOR,2);
createWordsFile(inFiles[3],conf);
removeWordsFile(inFiles[4],conf);
long inputSize=0;
inputSize+=getFileSize(inFiles[0]);
inputSize+=getFileSize(inFiles[1]);
inputSize+=getFileSize(inFiles[2]);
inputSize+=getFileSize(inFiles[3]);
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.setInputPaths(job,IN_DIR);
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job,new Path(OUT_DIR,"outputN1"));
assertTrue(job.waitForCompletion(true));
final Counters c1=Counters.downgrade(job.getCounters());
validateCounters(c1,98304,20480,81920);
validateFileCounters(c1,inputSize,0,0,0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests {@link TaskCounter}'s {@link TaskCounter.COMMITTED_HEAP_BYTES}.
* The test consists of running a low-memory job which consumes less heap
* memory and then running a high-memory job which consumes more heap memory,
* and then ensuring that COMMITTED_HEAP_BYTES of low-memory job is smaller
* than that of the high-memory job.
* @throws IOException
*/
@Test @SuppressWarnings("deprecation") public void testHeapUsageCounter() throws Exception {
JobConf conf=new JobConf();
FileSystem fileSystem=FileSystem.getLocal(conf);
Path rootDir=new Path(System.getProperty("test.build.data","/tmp"));
Path testRootDir=new Path(rootDir,"testHeapUsageCounter");
fileSystem.delete(testRootDir,true);
fileSystem.setWorkingDirectory(testRootDir);
fileSystem.deleteOnExit(testRootDir);
MiniMRCluster mrCluster=new MiniMRCluster(1,fileSystem.getUri().toString(),1);
try {
conf=mrCluster.createJobConf();
JobClient jobClient=new JobClient(conf);
Path inDir=new Path(testRootDir,"in");
createWordsFile(inDir,conf);
RunningJob lowMemJob=runHeapUsageTestJob(conf,testRootDir,"-Xms32m -Xmx1G",0,0,fileSystem,jobClient,inDir);
JobID lowMemJobID=lowMemJob.getID();
long lowMemJobMapHeapUsage=getTaskCounterUsage(jobClient,lowMemJobID,1,0,TaskType.MAP);
System.out.println("Job1 (low memory job) map task heap usage: " + lowMemJobMapHeapUsage);
long lowMemJobReduceHeapUsage=getTaskCounterUsage(jobClient,lowMemJobID,1,0,TaskType.REDUCE);
System.out.println("Job1 (low memory job) reduce task heap usage: " + lowMemJobReduceHeapUsage);
RunningJob highMemJob=runHeapUsageTestJob(conf,testRootDir,"-Xms32m -Xmx1G",lowMemJobMapHeapUsage + 256 * 1024 * 1024,lowMemJobReduceHeapUsage + 256 * 1024 * 1024,fileSystem,jobClient,inDir);
JobID highMemJobID=highMemJob.getID();
long highMemJobMapHeapUsage=getTaskCounterUsage(jobClient,highMemJobID,1,0,TaskType.MAP);
System.out.println("Job2 (high memory job) map task heap usage: " + highMemJobMapHeapUsage);
long highMemJobReduceHeapUsage=getTaskCounterUsage(jobClient,highMemJobID,1,0,TaskType.REDUCE);
System.out.println("Job2 (high memory job) reduce task heap usage: " + highMemJobReduceHeapUsage);
assertTrue("Incorrect map heap usage reported by the map task",lowMemJobMapHeapUsage < highMemJobMapHeapUsage);
assertTrue("Incorrect reduce heap usage reported by the reduce task",lowMemJobReduceHeapUsage < highMemJobReduceHeapUsage);
}
finally {
mrCluster.shutdown();
try {
fileSystem.delete(testRootDir,true);
}
catch ( IOException ioe) {
}
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testJobInfo() throws IOException {
JobID jid=new JobID("001",1);
Text user=new Text("User");
Path path=new Path("/tmp/test");
JobInfo info=new JobInfo(jid,user,path);
ByteArrayOutputStream out=new ByteArrayOutputStream();
info.write(new DataOutputStream(out));
JobInfo copyinfo=new JobInfo();
copyinfo.readFields(new DataInputStream(new ByteArrayInputStream(out.toByteArray())));
assertEquals(info.getJobID().toString(),copyinfo.getJobID().toString());
assertEquals(info.getJobSubmitDir().getName(),copyinfo.getJobSubmitDir().getName());
assertEquals(info.getUser().toString(),copyinfo.getUser().toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testTaskID() throws IOException, InterruptedException {
JobID jobid=new JobID("1014873536921",6);
TaskID tid=new TaskID(jobid,TaskType.MAP,0);
org.apache.hadoop.mapred.TaskID tid1=org.apache.hadoop.mapred.TaskID.downgrade(tid);
org.apache.hadoop.mapred.TaskReport treport=new org.apache.hadoop.mapred.TaskReport(tid1,0.0f,State.FAILED.toString(),null,TIPStatus.FAILED,100,100,new org.apache.hadoop.mapred.Counters());
Assert.assertEquals(treport.getTaskId(),"task_1014873536921_0006_m_000000");
Assert.assertEquals(treport.getTaskID().toString(),"task_1014873536921_0006_m_000000");
}
InternalCallVerifier EqualityVerifier
/**
* Test the case when a custom record delimiter is specified using the
* textinputformat.record.delimiter configuration property
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test public void testCustomRecordDelimiters() throws IOException, InterruptedException, ClassNotFoundException {
Configuration conf=new Configuration();
conf.set("textinputformat.record.delimiter","\t\n");
conf.setInt("mapreduce.job.maps",1);
FileSystem localFs=FileSystem.getLocal(conf);
localFs.delete(workDir,true);
createInputFile(conf);
createAndRunJob(conf);
String expected="0\tabc\ndef\n9\tghi\njkl\n";
assertEquals(expected,readOutputFile(conf));
}
InternalCallVerifier EqualityVerifier
@Test public void testRenameMapOutputForReduce() throws Exception {
final JobConf conf=new JobConf();
final MROutputFiles mrOutputFiles=new MROutputFiles();
mrOutputFiles.setConf(conf);
conf.set(MRConfig.LOCAL_DIR,localDirs[0].toString());
final Path mapOut=mrOutputFiles.getOutputFileForWrite(1);
conf.set(MRConfig.LOCAL_DIR,localDirs[1].toString());
final Path mapOutIdx=mrOutputFiles.getOutputIndexFileForWrite(1);
Assert.assertNotEquals("Paths must be different!",mapOut.getParent(),mapOutIdx.getParent());
conf.setStrings(MRConfig.LOCAL_DIR,localDirs);
final FileContext lfc=FileContext.getLocalFSFileContext(conf);
lfc.create(mapOut,EnumSet.of(CREATE)).close();
lfc.create(mapOutIdx,EnumSet.of(CREATE)).close();
final JobId jobId=MRBuilderUtils.newJobId(12345L,1,2);
final TaskId tid=MRBuilderUtils.newTaskId(jobId,0,TaskType.MAP);
final TaskAttemptId taid=MRBuilderUtils.newTaskAttemptId(tid,0);
LocalContainerLauncher.renameMapOutputForReduce(conf,taid,mrOutputFiles);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNewApis() throws Exception {
Random r=new Random(System.currentTimeMillis());
Path tmpBaseDir=new Path("/tmp/wc-" + r.nextInt());
final Path inDir=new Path(tmpBaseDir,"input");
final Path outDir=new Path(tmpBaseDir,"output");
String input="The quick brown fox\nhas many silly\nred fox sox\n";
FileSystem inFs=inDir.getFileSystem(conf);
FileSystem outFs=outDir.getFileSystem(conf);
outFs.delete(outDir,true);
if (!inFs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
DataOutputStream file=inFs.create(new Path(inDir,"part-0"));
file.writeBytes(input);
file.close();
}
Job job=Job.getInstance(conf,"word count");
job.setJarByClass(TestLocalModeWithNewApis.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job,inDir);
FileOutputFormat.setOutputPath(job,outDir);
assertEquals(job.waitForCompletion(true),true);
String output=readOutput(outDir,conf);
assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + "quick\t1\nred\t1\nsilly\t1\nsox\t1\n",output);
outFs.delete(tmpBaseDir,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testDeprecatedFunctions() throws Exception {
DistributedCache.addLocalArchives(conf,"Test Local Archives 1");
Assert.assertEquals("Test Local Archives 1",conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 1",DistributedCache.getLocalCacheArchives(conf)[0].getName());
DistributedCache.addLocalArchives(conf,"Test Local Archives 2");
Assert.assertEquals("Test Local Archives 1,Test Local Archives 2",conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(2,DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 2",DistributedCache.getLocalCacheArchives(conf)[1].getName());
DistributedCache.setLocalArchives(conf,"Test Local Archives 3");
Assert.assertEquals("Test Local Archives 3",conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 3",DistributedCache.getLocalCacheArchives(conf)[0].getName());
DistributedCache.addLocalFiles(conf,"Test Local Files 1");
Assert.assertEquals("Test Local Files 1",conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 1",DistributedCache.getLocalCacheFiles(conf)[0].getName());
DistributedCache.addLocalFiles(conf,"Test Local Files 2");
Assert.assertEquals("Test Local Files 1,Test Local Files 2",conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(2,DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 2",DistributedCache.getLocalCacheFiles(conf)[1].getName());
DistributedCache.setLocalFiles(conf,"Test Local Files 3");
Assert.assertEquals("Test Local Files 3",conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 3",DistributedCache.getLocalCacheFiles(conf)[0].getName());
DistributedCache.setArchiveTimestamps(conf,"1234567890");
Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_ARCHIVES_TIMESTAMPS,0));
Assert.assertEquals(1,DistributedCache.getArchiveTimestamps(conf).length);
Assert.assertEquals(1234567890,DistributedCache.getArchiveTimestamps(conf)[0]);
DistributedCache.setFileTimestamps(conf,"1234567890");
Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_FILES_TIMESTAMPS,0));
Assert.assertEquals(1,DistributedCache.getFileTimestamps(conf).length);
Assert.assertEquals(1234567890,DistributedCache.getFileTimestamps(conf)[0]);
DistributedCache.createAllSymlink(conf,new File("Test Job Cache Dir"),new File("Test Work Dir"));
Assert.assertNull(conf.get(DistributedCache.CACHE_SYMLINK));
Assert.assertTrue(DistributedCache.getSymlink(conf));
Assert.assertTrue(symlinkFile.createNewFile());
FileStatus fileStatus=DistributedCache.getFileStatus(conf,symlinkFile.toURI());
Assert.assertNotNull(fileStatus);
Assert.assertEquals(fileStatus.getModificationTime(),DistributedCache.getTimestamp(conf,symlinkFile.toURI()));
Assert.assertTrue(symlinkFile.delete());
DistributedCache.addCacheArchive(symlinkFile.toURI(),conf);
Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_ARCHIVES));
Assert.assertEquals(1,DistributedCache.getCacheArchives(conf).length);
Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheArchives(conf)[0]);
DistributedCache.addCacheFile(symlinkFile.toURI(),conf);
Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_FILES));
Assert.assertEquals(1,DistributedCache.getCacheFiles(conf).length);
Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheFiles(conf)[0]);
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testNullKeys() throws Exception {
JobConf conf=new JobConf(TestMapRed.class);
FileSystem fs=FileSystem.getLocal(conf);
HashSet values=new HashSet();
String m="AAAAAAAAAAAAAA";
for (int i=1; i < 11; ++i) {
values.add(m);
m=m.replace((char)('A' + i - 1),(char)('A' + i));
}
Path testdir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(fs);
fs.delete(testdir,true);
Path inFile=new Path(testdir,"nullin/blah");
SequenceFile.Writer w=SequenceFile.createWriter(fs,conf,inFile,NullWritable.class,Text.class,SequenceFile.CompressionType.NONE);
Text t=new Text();
for ( String s : values) {
t.set(s);
w.append(NullWritable.get(),t);
}
w.close();
FileInputFormat.setInputPaths(conf,inFile);
FileOutputFormat.setOutputPath(conf,new Path(testdir,"nullout"));
conf.setMapperClass(NullMapper.class);
conf.setReducerClass(IdentityReducer.class);
conf.setOutputKeyClass(NullWritable.class);
conf.setOutputValueClass(Text.class);
conf.setInputFormat(SequenceFileInputFormat.class);
conf.setOutputFormat(SequenceFileOutputFormat.class);
conf.setNumReduceTasks(1);
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.LOCAL_FRAMEWORK_NAME);
JobClient.runJob(conf);
SequenceFile.Reader r=new SequenceFile.Reader(fs,new Path(testdir,"nullout/part-00000"),conf);
m="AAAAAAAAAAAAAA";
for (int i=1; r.next(NullWritable.get(),t); ++i) {
assertTrue("Unexpected value: " + t,values.remove(t.toString()));
m=m.replace((char)('A' + i - 1),(char)('A' + i));
}
assertTrue("Missing values: " + values.toString(),values.isEmpty());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetMasterAddress(){
YarnConfiguration conf=new YarnConfiguration();
String masterHostname=Master.getMasterAddress(conf).getHostName();
InetSocketAddress rmAddr=NetUtils.createSocketAddr(YarnConfiguration.DEFAULT_RM_ADDRESS);
assertEquals(masterHostname,rmAddr.getHostName());
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.CLASSIC_FRAMEWORK_NAME);
conf.set(MRConfig.MASTER_ADDRESS,"local:invalid");
try {
Master.getMasterAddress(conf);
fail("Should not reach here as there is a bad master address");
}
catch ( Exception e) {
}
conf.set(MRConfig.MASTER_ADDRESS,"bar.com:8042");
masterHostname=Master.getMasterAddress(conf).getHostName();
assertEquals(masterHostname,"bar.com");
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_ADDRESS,"foo1.com:8192");
masterHostname=Master.getMasterAddress(conf).getHostName();
assertEquals(masterHostname,"foo1.com");
}
InternalCallVerifier EqualityVerifier
@Test public void testGetMasterUser(){
YarnConfiguration conf=new YarnConfiguration();
conf.set(MRConfig.MASTER_USER_NAME,"foo");
conf.set(YarnConfiguration.RM_PRINCIPAL,"bar");
assertEquals(Master.getMasterUserName(conf),"bar");
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.CLASSIC_FRAMEWORK_NAME);
assertEquals(Master.getMasterUserName(conf),"foo");
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
assertEquals(Master.getMasterUserName(conf),"bar");
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testExternalWritable() throws IOException {
String namenode=null;
MiniDFSCluster dfs=null;
MiniMRCluster mr=null;
FileSystem fileSys=null;
try {
final int taskTrackers=4;
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).build();
fileSys=dfs.getFileSystem();
namenode=fileSys.getUri().toString();
mr=new MiniMRCluster(taskTrackers,namenode,3);
JobConf jobConf=mr.createJobConf();
String result;
result=launchExternal(fileSys.getUri(),jobConf,"Dennis was here!\nDennis again!",3,1);
Assert.assertEquals("Dennis again!\t1\nDennis was here!\t1\n",result);
}
finally {
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testClassPath() throws IOException {
String namenode=null;
MiniDFSCluster dfs=null;
MiniMRCluster mr=null;
FileSystem fileSys=null;
try {
final int taskTrackers=4;
final int jobTrackerPort=60050;
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).build();
fileSys=dfs.getFileSystem();
namenode=fileSys.getUri().toString();
mr=new MiniMRCluster(taskTrackers,namenode,3);
JobConf jobConf=mr.createJobConf();
String result;
result=launchWordCount(fileSys.getUri(),jobConf,"The quick brown fox\nhas many silly\n" + "red fox sox\n",3,1);
Assert.assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + "quick\t1\nred\t1\nsilly\t1\nsox\t1\n",result);
}
finally {
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testRestart() throws Exception {
String rmAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
String rmAdminAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_ADMIN_ADDRESS);
String rmSchedAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_SCHEDULER_ADDRESS);
String rmRstrackerAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS);
String rmWebAppAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS);
String mrHistAddress1=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS);
String mrHistWebAppAddress1=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS);
mrCluster.restart();
String rmAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS);
String rmAdminAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_ADMIN_ADDRESS);
String rmSchedAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_SCHEDULER_ADDRESS);
String rmRstrackerAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS);
String rmWebAppAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS);
String mrHistAddress2=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS);
String mrHistWebAppAddress2=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS);
assertEquals("Address before restart: " + rmAddress1 + " is different from new address: "+ rmAddress2,rmAddress1,rmAddress2);
assertEquals("Address before restart: " + rmAdminAddress1 + " is different from new address: "+ rmAdminAddress2,rmAdminAddress1,rmAdminAddress2);
assertEquals("Address before restart: " + rmSchedAddress1 + " is different from new address: "+ rmSchedAddress2,rmSchedAddress1,rmSchedAddress2);
assertEquals("Address before restart: " + rmRstrackerAddress1 + " is different from new address: "+ rmRstrackerAddress2,rmRstrackerAddress1,rmRstrackerAddress2);
assertEquals("Address before restart: " + rmWebAppAddress1 + " is different from new address: "+ rmWebAppAddress2,rmWebAppAddress1,rmWebAppAddress2);
assertEquals("Address before restart: " + mrHistAddress1 + " is different from new address: "+ mrHistAddress2,mrHistAddress1,mrHistAddress2);
assertEquals("Address before restart: " + mrHistWebAppAddress1 + " is different from new address: "+ mrHistWebAppAddress2,mrHistWebAppAddress1,mrHistWebAppAddress2);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test JobConf
* @throws Exception
*/
@SuppressWarnings("deprecation") @Test(timeout=500000) public void testNetworkedJob() throws Exception {
MiniMRClientCluster mr=null;
FileSystem fileSys=null;
try {
mr=createMiniClusterWithCapacityScheduler();
JobConf job=new JobConf(mr.getConfig());
fileSys=FileSystem.get(job);
fileSys.delete(testDir,true);
FSDataOutputStream out=fileSys.create(inFile,true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job,inFile);
FileOutputFormat.setOutputPath(job,outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client=new JobClient(mr.getConfig());
RunningJob rj=client.submitJob(job);
JobID jobId=rj.getID();
NetworkedJob runningJob=(NetworkedJob)client.getJob(jobId);
runningJob.setJobPriority(JobPriority.HIGH.name());
assertTrue(runningJob.getConfiguration().toString().endsWith("0001/job.xml"));
assertEquals(runningJob.getID(),jobId);
assertEquals(runningJob.getJobID(),jobId.toString());
assertEquals(runningJob.getJobName(),"N/A");
assertTrue(runningJob.getJobFile().endsWith(".staging/" + runningJob.getJobID() + "/job.xml"));
assertTrue(runningJob.getTrackingURL().length() > 0);
assertTrue(runningJob.mapProgress() == 0.0f);
assertTrue(runningJob.reduceProgress() == 0.0f);
assertTrue(runningJob.cleanupProgress() == 0.0f);
assertTrue(runningJob.setupProgress() == 0.0f);
TaskCompletionEvent[] tce=runningJob.getTaskCompletionEvents(0);
assertEquals(tce.length,0);
assertEquals(runningJob.getHistoryUrl(),"");
assertFalse(runningJob.isRetired());
assertEquals(runningJob.getFailureInfo(),"");
assertEquals(runningJob.getJobStatus().getJobName(),"N/A");
assertEquals(client.getMapTaskReports(jobId).length,0);
try {
client.getSetupTaskReports(jobId);
}
catch ( YarnRuntimeException e) {
assertEquals(e.getMessage(),"Unrecognized task type: JOB_SETUP");
}
try {
client.getCleanupTaskReports(jobId);
}
catch ( YarnRuntimeException e) {
assertEquals(e.getMessage(),"Unrecognized task type: JOB_CLEANUP");
}
assertEquals(client.getReduceTaskReports(jobId).length,0);
ClusterStatus status=client.getClusterStatus(true);
assertEquals(status.getActiveTrackerNames().size(),2);
assertEquals(status.getBlacklistedTrackers(),0);
assertEquals(status.getBlacklistedTrackerNames().size(),0);
assertEquals(status.getBlackListedTrackersInfo().size(),0);
assertEquals(status.getJobTrackerStatus(),JobTrackerStatus.RUNNING);
assertEquals(status.getMapTasks(),1);
assertEquals(status.getMaxMapTasks(),20);
assertEquals(status.getMaxReduceTasks(),4);
assertEquals(status.getNumExcludedNodes(),0);
assertEquals(status.getReduceTasks(),1);
assertEquals(status.getTaskTrackers(),2);
assertEquals(status.getTTExpiryInterval(),0);
assertEquals(status.getJobTrackerStatus(),JobTrackerStatus.RUNNING);
assertEquals(status.getGraylistedTrackers(),0);
ByteArrayOutputStream dataOut=new ByteArrayOutputStream();
status.write(new DataOutputStream(dataOut));
ClusterStatus status2=new ClusterStatus();
status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut.toByteArray())));
assertEquals(status.getActiveTrackerNames(),status2.getActiveTrackerNames());
assertEquals(status.getBlackListedTrackersInfo(),status2.getBlackListedTrackersInfo());
assertEquals(status.getMapTasks(),status2.getMapTasks());
try {
}
catch ( RuntimeException e) {
assertTrue(e.getMessage().endsWith("not found on CLASSPATH"));
}
JobClient.setTaskOutputFilter(job,TaskStatusFilter.ALL);
assertEquals(JobClient.getTaskOutputFilter(job),TaskStatusFilter.ALL);
assertEquals(client.getDefaultMaps(),20);
assertEquals(client.getDefaultReduces(),4);
assertEquals(client.getSystemDir().getName(),"jobSubmitDir");
JobQueueInfo[] rootQueueInfo=client.getRootQueues();
assertEquals(rootQueueInfo.length,1);
assertEquals(rootQueueInfo[0].getQueueName(),"default");
JobQueueInfo[] qinfo=client.getQueues();
assertEquals(qinfo.length,1);
assertEquals(qinfo[0].getQueueName(),"default");
assertEquals(client.getChildQueues("default").length,0);
assertEquals(client.getJobsFromQueue("default").length,1);
assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith("/job.xml"));
JobQueueInfo qi=client.getQueueInfo("default");
assertEquals(qi.getQueueName(),"default");
assertEquals(qi.getQueueState(),"running");
QueueAclsInfo[] aai=client.getQueueAclsForCurrentUser();
assertEquals(aai.length,2);
assertEquals(aai[0].getQueueName(),"root");
assertEquals(aai[1].getQueueName(),"default");
Token token=client.getDelegationToken(new Text(UserGroupInformation.getCurrentUser().getShortUserName()));
assertEquals(token.getKind().toString(),"RM_DELEGATION_TOKEN");
assertEquals("Expected matching JobIDs",jobId,client.getJob(jobId).getJobStatus().getJobID());
assertEquals("Expected matching startTimes",rj.getJobStatus().getStartTime(),client.getJob(jobId).getJobStatus().getStartTime());
}
finally {
if (fileSys != null) {
fileSys.delete(testDir,true);
}
if (mr != null) {
mr.stop();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* test BlackListInfo class
* @throws IOException
*/
@Test(timeout=5000) public void testBlackListInfo() throws IOException {
BlackListInfo info=new BlackListInfo();
info.setBlackListReport("blackListInfo");
info.setReasonForBlackListing("reasonForBlackListing");
info.setTrackerName("trackerName");
ByteArrayOutputStream byteOut=new ByteArrayOutputStream();
DataOutput out=new DataOutputStream(byteOut);
info.write(out);
BlackListInfo info2=new BlackListInfo();
info2.readFields(new DataInputStream(new ByteArrayInputStream(byteOut.toByteArray())));
assertEquals(info,info);
assertEquals(info.toString(),info.toString());
assertEquals(info.getTrackerName(),"trackerName");
assertEquals(info.getReasonForBlackListing(),"reasonForBlackListing");
assertEquals(info.getBlackListReport(),"blackListInfo");
}
InternalCallVerifier EqualityVerifier
@Test(timeout=500000) public void testGetJobStatus() throws IOException, InterruptedException, ClassNotFoundException {
MiniMRClientCluster mr=null;
FileSystem fileSys=null;
try {
mr=createMiniClusterWithCapacityScheduler();
JobConf job=new JobConf(mr.getConfig());
fileSys=FileSystem.get(job);
fileSys.delete(testDir,true);
FSDataOutputStream out=fileSys.create(inFile,true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job,inFile);
FileOutputFormat.setOutputPath(job,outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client=new JobClient(mr.getConfig());
RunningJob rj=client.submitJob(job);
JobID jobId=rj.getID();
assertEquals("Expected matching JobIDs",jobId,client.getJob(jobId).getJobStatus().getJobID());
assertEquals("Expected matching startTimes",rj.getJobStatus().getStartTime(),client.getJob(jobId).getJobStatus().getStartTime());
}
finally {
if (fileSys != null) {
fileSys.delete(testDir,true);
}
if (mr != null) {
mr.stop();
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in=new File(TEST_ROOT_DIR,"input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out=new File(TEST_ROOT_DIR,"output");
PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf job=new JobConf();
job.set("mapreduce.framework.name","local");
TextInputFormat.setInputPaths(job,new Path(in.getPath()));
TextOutputFormat.setOutputPath(job,new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormat(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormat(TextOutputFormat.class);
job.setOutputValueGroupingComparator(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.setCombinerKeyGroupingComparator(GroupComparator.class);
job.setInt("min.num.spills.for.combine",0);
JobClient client=new JobClient(job);
RunningJob runningJob=client.submitJob(job);
runningJob.waitForCompletion();
if (runningJob.isSuccessful()) {
Counters counters=runningJob.getCounters();
long combinerInputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_INPUT_RECORDS");
long combinerOutputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_OUTPUT_RECORDS");
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-00000")));
Set output=new HashSet();
String line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNull(line);
br.close();
Set expected=new HashSet();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected,output);
}
else {
Assert.fail("Job failed");
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test Reporter.NULL
*/
@Test(timeout=5000) public void testReporter(){
Reporter nullReporter=Reporter.NULL;
assertNull(nullReporter.getCounter(null));
assertNull(nullReporter.getCounter("group","name"));
try {
assertNull(nullReporter.getInputSplit());
}
catch ( UnsupportedOperationException e) {
assertEquals("NULL reporter has no input",e.getMessage());
}
assertEquals(0,nullReporter.getProgress(),0.01);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* test deprecated methods of TaskCompletionEvent
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testTaskCompletionEvent(){
TaskAttemptID taid=new TaskAttemptID("001",1,TaskType.REDUCE,2,3);
TaskCompletionEvent template=new TaskCompletionEvent(12,taid,13,true,Status.SUCCEEDED,"httptracker");
TaskCompletionEvent testEl=TaskCompletionEvent.downgrade(template);
testEl.setTaskAttemptId(taid);
testEl.setTaskTrackerHttp("httpTracker");
testEl.setTaskId("attempt_001_0001_m_000002_04");
assertEquals("attempt_001_0001_m_000002_4",testEl.getTaskId());
testEl.setTaskStatus(Status.OBSOLETE);
assertEquals(Status.OBSOLETE.toString(),testEl.getStatus().toString());
testEl.setTaskRunTime(20);
assertEquals(testEl.getTaskRunTime(),20);
testEl.setEventId(16);
assertEquals(testEl.getEventId(),16);
}
InternalCallVerifier EqualityVerifier
/**
* test depricated methods of JobProfile
* @throws IOException
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testJobProfile() throws IOException {
JobProfile profile=new JobProfile("user","job_001_03","jobFile","uri","name");
assertEquals("job_001_0003",profile.getJobId());
assertEquals("default",profile.getQueueName());
ByteArrayOutputStream out=new ByteArrayOutputStream();
profile.write(new DataOutputStream(out));
JobProfile profile2=new JobProfile();
profile2.readFields(new DataInputStream(new ByteArrayInputStream(out.toByteArray())));
assertEquals(profile2.name,profile.name);
assertEquals(profile2.jobFile,profile.jobFile);
assertEquals(profile2.queueName,profile.queueName);
assertEquals(profile2.url,profile.url);
assertEquals(profile2.user,profile.user);
}
InternalCallVerifier EqualityVerifier
/**
* test JobID
* @throws IOException
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testJobID() throws IOException {
JobID jid=new JobID("001",2);
ByteArrayOutputStream out=new ByteArrayOutputStream();
jid.write(new DataOutputStream(out));
assertEquals(jid,JobID.read(new DataInputStream(new ByteArrayInputStream(out.toByteArray()))));
assertEquals("job_001_0001",JobID.getJobIDsPattern("001",1));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* test deprecated methods of TaskID
* @throws IOException
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testDepricatedMethods() throws IOException {
JobID jid=new JobID();
TaskID test=new TaskID(jid,true,1);
assertEquals(test.getTaskType(),TaskType.MAP);
test=new TaskID(jid,false,1);
assertEquals(test.getTaskType(),TaskType.REDUCE);
test=new TaskID("001",1,false,1);
assertEquals(test.getTaskType(),TaskType.REDUCE);
test=new TaskID("001",1,true,1);
assertEquals(test.getTaskType(),TaskType.MAP);
ByteArrayOutputStream out=new ByteArrayOutputStream();
test.write(new DataOutputStream(out));
TaskID ti=TaskID.read(new DataInputStream(new ByteArrayInputStream(out.toByteArray())));
assertEquals(ti.toString(),test.toString());
assertEquals("task_001_0001_m_000002",TaskID.getTaskIDsPattern("001",1,true,2));
assertEquals("task_003_0001_m_000004",TaskID.getTaskIDsPattern("003",1,TaskType.MAP,4));
assertEquals("003_0001_m_000004",TaskID.getTaskIDsPatternWOPrefix("003",1,TaskType.MAP,4).toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test QueueManager
* configuration from file
* @throws IOException
*/
@Test(timeout=5000) public void testQueue() throws IOException {
File f=null;
try {
f=writeFile();
QueueManager manager=new QueueManager(f.getCanonicalPath(),true);
manager.setSchedulerInfo("first","queueInfo");
manager.setSchedulerInfo("second","queueInfoqueueInfo");
Queue root=manager.getRoot();
assertTrue(root.getChildren().size() == 2);
Iterator iterator=root.getChildren().iterator();
Queue firstSubQueue=iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue=iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(secondSubQueue.getProperties().getProperty("key"),"value");
assertEquals(secondSubQueue.getProperties().getProperty("key1"),"value1");
assertEquals(firstSubQueue.getState().getStateName(),"running");
assertEquals(secondSubQueue.getState().getStateName(),"stopped");
Set template=new HashSet();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(),template);
UserGroupInformation mockUGI=mock(UserGroupInformation.class);
when(mockUGI.getShortUserName()).thenReturn("user1");
String[] groups={"group1"};
when(mockUGI.getGroupNames()).thenReturn(groups);
assertTrue(manager.hasAccess("first",QueueACL.SUBMIT_JOB,mockUGI));
assertFalse(manager.hasAccess("second",QueueACL.SUBMIT_JOB,mockUGI));
assertFalse(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI));
when(mockUGI.getShortUserName()).thenReturn("user3");
assertTrue(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI));
QueueAclsInfo[] qai=manager.getQueueAcls(mockUGI);
assertEquals(qai.length,1);
manager.refreshQueues(getConfiguration(),null);
iterator=root.getChildren().iterator();
Queue firstSubQueue1=iterator.next();
Queue secondSubQueue1=iterator.next();
assertTrue(firstSubQueue.equals(firstSubQueue1));
assertEquals(firstSubQueue1.getState().getStateName(),"running");
assertEquals(secondSubQueue1.getState().getStateName(),"stopped");
assertEquals(firstSubQueue1.getSchedulingInfo(),"queueInfo");
assertEquals(secondSubQueue1.getSchedulingInfo(),"queueInfoqueueInfo");
assertEquals(firstSubQueue.getJobQueueInfo().getQueueName(),"first");
assertEquals(firstSubQueue.getJobQueueInfo().getQueueState(),"running");
assertEquals(firstSubQueue.getJobQueueInfo().getSchedulingInfo(),"queueInfo");
assertEquals(secondSubQueue.getJobQueueInfo().getChildren().size(),0);
assertEquals(manager.getSchedulerInfo("first"),"queueInfo");
Set queueJobQueueInfos=new HashSet();
for ( JobQueueInfo jobInfo : manager.getJobQueueInfos()) {
queueJobQueueInfos.add(jobInfo.getQueueName());
}
Set rootJobQueueInfos=new HashSet();
for ( Queue queue : root.getChildren()) {
rootJobQueueInfos.add(queue.getJobQueueInfo().getQueueName());
}
assertEquals(queueJobQueueInfos,rootJobQueueInfos);
assertEquals(manager.getJobQueueInfoMapping().get("first").getQueueName(),"first");
Writer writer=new StringWriter();
Configuration conf=getConfiguration();
conf.unset(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY);
QueueManager.dumpConfiguration(writer,f.getAbsolutePath(),conf);
String result=writer.toString();
assertTrue(result.indexOf("\"name\":\"first\",\"state\":\"running\",\"acl_submit_job\":\"user1,user2 group1,group2\",\"acl_administer_jobs\":\"user3,user4 group3,group4\",\"properties\":[],\"children\":[]") > 0);
writer=new StringWriter();
QueueManager.dumpConfiguration(writer,conf);
result=writer.toString();
assertEquals("{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[{\"key\":\"capacity\",\"value\":\"20\"},{\"key\":\"user-limit\",\"value\":\"30\"}],\"children\":[]}]}]}",result);
QueueAclsInfo qi=new QueueAclsInfo();
assertNull(qi.getQueueName());
}
finally {
if (f != null) {
f.delete();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test for Qmanager with empty configuration
* @throws IOException
*/
@Test(timeout=5000) public void test2Queue() throws IOException {
Configuration conf=getConfiguration();
QueueManager manager=new QueueManager(conf);
manager.setSchedulerInfo("first","queueInfo");
manager.setSchedulerInfo("second","queueInfoqueueInfo");
Queue root=manager.getRoot();
assertTrue(root.getChildren().size() == 2);
Iterator iterator=root.getChildren().iterator();
Queue firstSubQueue=iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue=iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(firstSubQueue.getState().getStateName(),"running");
assertEquals(secondSubQueue.getState().getStateName(),"stopped");
assertTrue(manager.isRunning("first"));
assertFalse(manager.isRunning("second"));
assertEquals(firstSubQueue.getSchedulingInfo(),"queueInfo");
assertEquals(secondSubQueue.getSchedulingInfo(),"queueInfoqueueInfo");
Set template=new HashSet();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(),template);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testStatusLimit() throws IOException, InterruptedException, ClassNotFoundException {
Path test=new Path(testRootTempDir,"testStatusLimit");
Configuration conf=new Configuration();
Path inDir=new Path(test,"in");
Path outDir=new Path(test,"out");
FileSystem fs=FileSystem.get(conf);
if (fs.exists(inDir)) {
fs.delete(inDir,true);
}
fs.mkdirs(inDir);
DataOutputStream file=fs.create(new Path(inDir,"part-" + 0));
file.writeBytes("testStatusLimit");
file.close();
if (fs.exists(outDir)) {
fs.delete(outDir,true);
}
Job job=Job.getInstance(conf,"testStatusLimit");
job.setMapperClass(StatusLimitMapper.class);
job.setNumReduceTasks(0);
FileInputFormat.addInputPath(job,inDir);
FileOutputFormat.setOutputPath(job,outDir);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test {@link Reporter}'s progress for map-reduce job.
*/
@Test public void testReporterProgressForMRJob() throws IOException {
Path test=new Path(testRootTempDir,"testReporterProgressForMRJob");
JobConf conf=new JobConf();
conf.setMapperClass(ProgressTesterMapper.class);
conf.setReducerClass(ProgressTestingReducer.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMaxMapAttempts(1);
conf.setMaxReduceAttempts(1);
RunningJob job=UtilsForTests.runJob(conf,new Path(test,"in"),new Path(test,"out"),1,1,INPUT);
job.waitForCompletion();
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test {@link Reporter}'s progress for a map-only job.
* This will make sure that only the map phase decides the attempt's progress.
*/
@SuppressWarnings("deprecation") @Test public void testReporterProgressForMapOnlyJob() throws IOException {
Path test=new Path(testRootTempDir,"testReporterProgressForMapOnlyJob");
JobConf conf=new JobConf();
conf.setMapperClass(ProgressTesterMapper.class);
conf.setMapOutputKeyClass(Text.class);
conf.setMaxMapAttempts(1);
conf.setMaxReduceAttempts(0);
RunningJob job=UtilsForTests.runJob(conf,new Path(test,"in"),new Path(test,"out"),1,0,INPUT);
job.waitForCompletion();
assertTrue("Job failed",job.isSuccessful());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void tesAllJobs() throws Exception {
final ApplicationClientProtocol applicationsManager=Mockito.mock(ApplicationClientProtocol.class);
GetApplicationsResponse allApplicationsResponse=Records.newRecord(GetApplicationsResponse.class);
List applications=new ArrayList();
applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.FAILED));
applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.SUCCEEDED));
applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.KILLED));
applications.add(getApplicationReport(YarnApplicationState.FAILED,FinalApplicationStatus.FAILED));
allApplicationsResponse.setApplicationList(applications);
Mockito.when(applicationsManager.getApplications(Mockito.any(GetApplicationsRequest.class))).thenReturn(allApplicationsResponse);
ResourceMgrDelegate resourceMgrDelegate=new ResourceMgrDelegate(new YarnConfiguration()){
@Override protected void serviceStart() throws Exception {
Assert.assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl)this.client).setRMClient(applicationsManager);
}
}
;
JobStatus[] allJobs=resourceMgrDelegate.getAllJobs();
Assert.assertEquals(State.FAILED,allJobs[0].getState());
Assert.assertEquals(State.SUCCEEDED,allJobs[1].getState());
Assert.assertEquals(State.KILLED,allJobs[2].getState());
Assert.assertEquals(State.FAILED,allJobs[3].getState());
}
InternalCallVerifier BooleanVerifier
/**
* Tests that getRootQueues makes a request for the (recursive) child queues
* @throws IOException
*/
@Test public void testGetRootQueues() throws IOException, InterruptedException {
final ApplicationClientProtocol applicationsManager=Mockito.mock(ApplicationClientProtocol.class);
GetQueueInfoResponse response=Mockito.mock(GetQueueInfoResponse.class);
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
Mockito.when(response.getQueueInfo()).thenReturn(queueInfo);
try {
Mockito.when(applicationsManager.getQueueInfo(Mockito.any(GetQueueInfoRequest.class))).thenReturn(response);
}
catch ( YarnException e) {
throw new IOException(e);
}
ResourceMgrDelegate delegate=new ResourceMgrDelegate(new YarnConfiguration()){
@Override protected void serviceStart() throws Exception {
Assert.assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl)this.client).setRMClient(applicationsManager);
}
}
;
delegate.getRootQueues();
ArgumentCaptor argument=ArgumentCaptor.forClass(GetQueueInfoRequest.class);
try {
Mockito.verify(applicationsManager).getQueueInfo(argument.capture());
}
catch ( YarnException e) {
throw new IOException(e);
}
Assert.assertTrue("Children of root queue not requested",argument.getValue().getIncludeChildQueues());
Assert.assertTrue("Request wasn't to recurse through children",argument.getValue().getRecursive());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRecovery() throws IOException {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(12345,1);
final JobID jobId=JobID.downgrade(TypeConverter.fromYarn(appId));
final File tmpDir=new File(System.getProperty("test.build.data",System.getProperty("java.io.tmpdir")),TestShuffleHandler.class.getName());
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3);
ShuffleHandler shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
tmpDir.mkdirs();
try {
shuffle.init(conf);
shuffle.start();
DataOutputBuffer outputBuffer=new DataOutputBuffer();
outputBuffer.reset();
Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService"));
jt.write(outputBuffer);
shuffle.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength())));
int rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
shuffle.stopApplication(new ApplicationTerminationContext(appId));
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,rc);
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,rc);
}
finally {
if (shuffle != null) {
shuffle.close();
}
FileUtil.fullyDelete(tmpDir);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRecoveryFromOtherVersions() throws IOException {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(12345,1);
final File tmpDir=new File(System.getProperty("test.build.data",System.getProperty("java.io.tmpdir")),TestShuffleHandler.class.getName());
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3);
ShuffleHandler shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
tmpDir.mkdirs();
try {
shuffle.init(conf);
shuffle.start();
DataOutputBuffer outputBuffer=new DataOutputBuffer();
outputBuffer.reset();
Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService"));
jt.write(outputBuffer);
shuffle.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength())));
int rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
Version version=Version.newInstance(1,0);
Assert.assertEquals(version,shuffle.getCurrentVersion());
Version version11=Version.newInstance(1,1);
shuffle.storeVersion(version11);
Assert.assertEquals(version11,shuffle.loadVersion());
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
Assert.assertEquals(version,shuffle.loadVersion());
rc=getShuffleResponseCode(shuffle,jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK,rc);
Version version21=Version.newInstance(2,1);
shuffle.storeVersion(version21);
Assert.assertEquals(version21,shuffle.loadVersion());
shuffle.close();
shuffle=new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
try {
shuffle.start();
Assert.fail("Incompatible version, should expect fail here.");
}
catch ( ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for state DB schema:"));
}
}
finally {
if (shuffle != null) {
shuffle.close();
}
FileUtil.fullyDelete(tmpDir);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* simulate a reducer that sends an invalid shuffle-header - sometimes a wrong
* header_name and sometimes a wrong version
* @throws Exception exception
*/
@Test(timeout=10000) public void testIncompatibleShuffleVersion() throws Exception {
final int failureNum=3;
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
ShuffleHandler shuffleHandler=new ShuffleHandler();
shuffleHandler.init(conf);
shuffleHandler.start();
URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0");
for (int i=0; i < failureNum; ++i) {
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,i == 0 ? "mapreduce" : "other");
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,i == 1 ? "1.0.0" : "1.0.1");
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_BAD_REQUEST,conn.getResponseCode());
}
shuffleHandler.stop();
shuffleHandler.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Verify client prematurely closing a connection.
* @throws Exception exception.
*/
@Test(timeout=10000) public void testClientClosesConnection() throws Exception {
final ArrayList failures=new ArrayList(1);
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
ShuffleHandler shuffleHandler=new ShuffleHandler(){
@Override protected Shuffle getShuffle( Configuration conf){
return new Shuffle(conf){
@Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException {
return null;
}
@Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException {
super.setResponseHeaders(response,keepAliveParam,100);
}
@Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
}
@Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException {
ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1);
DataOutputBuffer dob=new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
dob=new DataOutputBuffer();
for (int i=0; i < 100000; ++i) {
header.write(dob);
}
return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
}
@Override protected void sendError( ChannelHandlerContext ctx, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
@Override protected void sendError( ChannelHandlerContext ctx, String message, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
}
;
}
}
;
shuffleHandler.init(conf);
shuffleHandler.start();
URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
DataInputStream input=new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
Assert.assertEquals("close",conn.getHeaderField(HttpHeaders.CONNECTION));
ShuffleHeader header=new ShuffleHeader();
header.readFields(input);
input.close();
shuffleHandler.stop();
Assert.assertTrue("sendError called when client closed connection",failures.size() == 0);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testKeepAlive() throws Exception {
final ArrayList failures=new ArrayList(1);
Configuration conf=new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0);
conf.setBoolean(ShuffleHandler.SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED,true);
conf.setInt(ShuffleHandler.SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT,-100);
ShuffleHandler shuffleHandler=new ShuffleHandler(){
@Override protected Shuffle getShuffle( final Configuration conf){
return new Shuffle(conf){
@Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException {
return null;
}
@Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
}
@Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException {
ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1);
DataOutputBuffer dob=new DataOutputBuffer();
header.write(dob);
dob=new DataOutputBuffer();
for (int i=0; i < 100000; ++i) {
header.write(dob);
}
long contentLength=dob.getLength();
if (keepAliveParam) {
connectionKeepAliveEnabled=false;
}
super.setResponseHeaders(response,keepAliveParam,contentLength);
}
@Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException {
HttpResponse response=new DefaultHttpResponse(HTTP_1_1,OK);
ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1);
DataOutputBuffer dob=new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
dob=new DataOutputBuffer();
for (int i=0; i < 100000; ++i) {
header.write(dob);
}
return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength()));
}
@Override protected void sendError( ChannelHandlerContext ctx, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
@Override protected void sendError( ChannelHandlerContext ctx, String message, HttpResponseStatus status){
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
}
;
}
}
;
shuffleHandler.init(conf);
shuffleHandler.start();
String shuffleBaseURL="http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY);
URL url=new URL(shuffleBaseURL + "/mapOutput?job=job_12345_1&reduce=1&" + "map=attempt_12345_1_m_1_0");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
DataInputStream input=new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpHeaders.KEEP_ALIVE,conn.getHeaderField(HttpHeaders.CONNECTION));
Assert.assertEquals("timeout=1",conn.getHeaderField(HttpHeaders.KEEP_ALIVE));
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
ShuffleHeader header=new ShuffleHeader();
header.readFields(input);
input.close();
url=new URL(shuffleBaseURL + "/mapOutput?job=job_12345_1&reduce=1&" + "map=attempt_12345_1_m_1_0&keepAlive=true");
conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
input=new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpHeaders.KEEP_ALIVE,conn.getHeaderField(HttpHeaders.CONNECTION));
Assert.assertEquals("timeout=1",conn.getHeaderField(HttpHeaders.KEEP_ALIVE));
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
header=new ShuffleHeader();
header.readFields(input);
input.close();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCheckpointIDTracking() throws IOException, InterruptedException {
SystemClock clock=new SystemClock();
org.apache.hadoop.mapreduce.v2.app.job.Task mockTask=mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true);
Job mockJob=mock(Job.class);
when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler ea=mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class);
AppContext appCtx=mock(AppContext.class);
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
when(appCtx.getClock()).thenReturn(clock);
when(appCtx.getEventHandler()).thenReturn(ea);
JobTokenSecretManager secret=mock(JobTokenSecretManager.class);
final TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
TaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,policy){
@Override protected void registerHeartbeatHandler( Configuration conf){
taskHeartbeatHandler=hbHandler;
}
}
;
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.TASK_PREEMPTION,true);
listener.init(conf);
listener.start();
TaskAttemptID tid=new TaskAttemptID("12345",1,TaskType.REDUCE,1,0);
List partialOut=new ArrayList();
partialOut.add(new Path("/prev1"));
partialOut.add(new Path("/prev2"));
Counters counters=mock(Counters.class);
final long CBYTES=64L * 1024 * 1024;
final long CTIME=4344L;
final Path CLOC=new Path("/test/1");
Counter cbytes=mock(Counter.class);
when(cbytes.getValue()).thenReturn(CBYTES);
Counter ctime=mock(Counter.class);
when(ctime.getValue()).thenReturn(CTIME);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_BYTES))).thenReturn(cbytes);
when(counters.findCounter(eq(EnumCounter.CHECKPOINT_MS))).thenReturn(ctime);
TaskCheckpointID incid=new TaskCheckpointID(new FSCheckpointID(CLOC),partialOut,counters);
listener.setCheckpointID(org.apache.hadoop.mapred.TaskID.downgrade(tid.getTaskID()),incid);
CheckpointID outcid=listener.getCheckpointID(tid.getTaskID());
TaskCheckpointID tcid=(TaskCheckpointID)outcid;
assertEquals(CBYTES,tcid.getCheckpointBytes());
assertEquals(CTIME,tcid.getCheckpointTime());
assertTrue(partialOut.containsAll(tcid.getPartialCommittedOutput()));
assertTrue(tcid.getPartialCommittedOutput().containsAll(partialOut));
assert outcid == incid;
listener.stop();
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testGetTask() throws IOException {
AppContext appCtx=mock(AppContext.class);
JobTokenSecretManager secret=mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class);
TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler ea=mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
MockTaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,hbHandler,policy);
Configuration conf=new Configuration();
listener.init(conf);
listener.start();
JVMId id=new JVMId("foo",1,true,1);
WrappedJvmID wid=new WrappedJvmID(id.getJobId(),id.isMap,id.getId());
JvmContext context=new JvmContext();
context.jvmId=id;
JvmTask result=listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
TaskAttemptId attemptID=mock(TaskAttemptId.class);
Task task=mock(Task.class);
listener.registerPendingTask(task,wid);
result=listener.getTask(context);
assertNull(result);
listener.unregister(attemptID,wid);
listener.registerPendingTask(task,wid);
listener.registerLaunchedTask(attemptID,wid);
verify(hbHandler).register(attemptID);
result=listener.getTask(context);
assertNotNull(result);
assertFalse(result.shouldDie);
result=listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
listener.unregister(attemptID,wid);
result=listener.getTask(context);
assertNotNull(result);
assertTrue(result.shouldDie);
listener.stop();
JVMId jvmid=JVMId.forName("jvm_001_002_m_004");
assertNotNull(jvmid);
try {
JVMId.forName("jvm_001_002_m_004_006");
fail();
}
catch ( IllegalArgumentException e) {
assertEquals(e.getMessage(),"TaskId string : jvm_001_002_m_004_006 is not properly formed");
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testGetMapCompletionEvents() throws IOException {
TaskAttemptCompletionEvent[] empty={};
TaskAttemptCompletionEvent[] taskEvents={createTce(0,true,TaskAttemptCompletionEventStatus.OBSOLETE),createTce(1,false,TaskAttemptCompletionEventStatus.FAILED),createTce(2,true,TaskAttemptCompletionEventStatus.SUCCEEDED),createTce(3,false,TaskAttemptCompletionEventStatus.FAILED)};
TaskAttemptCompletionEvent[] mapEvents={taskEvents[0],taskEvents[2]};
Job mockJob=mock(Job.class);
when(mockJob.getTaskAttemptCompletionEvents(0,100)).thenReturn(taskEvents);
when(mockJob.getTaskAttemptCompletionEvents(0,2)).thenReturn(Arrays.copyOfRange(taskEvents,0,2));
when(mockJob.getTaskAttemptCompletionEvents(2,100)).thenReturn(Arrays.copyOfRange(taskEvents,2,4));
when(mockJob.getMapAttemptCompletionEvents(0,100)).thenReturn(TypeConverter.fromYarn(mapEvents));
when(mockJob.getMapAttemptCompletionEvents(0,2)).thenReturn(TypeConverter.fromYarn(mapEvents));
when(mockJob.getMapAttemptCompletionEvents(2,100)).thenReturn(TypeConverter.fromYarn(empty));
AppContext appCtx=mock(AppContext.class);
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
JobTokenSecretManager secret=mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class);
final TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler ea=mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
TaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,policy){
@Override protected void registerHeartbeatHandler( Configuration conf){
taskHeartbeatHandler=hbHandler;
}
}
;
Configuration conf=new Configuration();
listener.init(conf);
listener.start();
JobID jid=new JobID("12345",1);
TaskAttemptID tid=new TaskAttemptID("12345",1,TaskType.REDUCE,1,0);
MapTaskCompletionEventsUpdate update=listener.getMapCompletionEvents(jid,0,100,tid);
assertEquals(2,update.events.length);
update=listener.getMapCompletionEvents(jid,0,2,tid);
assertEquals(2,update.events.length);
update=listener.getMapCompletionEvents(jid,2,100,tid);
assertEquals(0,update.events.length);
}
InternalCallVerifier BooleanVerifier
@SuppressWarnings("rawtypes") @Test public void testStatusUpdateProgress() throws IOException, InterruptedException {
AppContext appCtx=mock(AppContext.class);
JobTokenSecretManager secret=mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class);
TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler ea=mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
MockTaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,hbHandler,policy);
Configuration conf=new Configuration();
listener.init(conf);
listener.start();
JVMId id=new JVMId("foo",1,true,1);
WrappedJvmID wid=new WrappedJvmID(id.getJobId(),id.isMap,id.getId());
TaskAttemptID attemptID=new TaskAttemptID("1",1,TaskType.MAP,1,1);
TaskAttemptId attemptId=TypeConverter.toYarn(attemptID);
Task task=mock(Task.class);
listener.registerPendingTask(task,wid);
listener.registerLaunchedTask(attemptId,wid);
verify(hbHandler).register(attemptId);
AMFeedback feedback=listener.statusUpdate(attemptID,null);
assertTrue(feedback.getTaskFound());
verify(hbHandler,never()).progressing(eq(attemptId));
MapTaskStatus mockStatus=new MapTaskStatus(attemptID,0.0f,1,TaskStatus.State.RUNNING,"","RUNNING","",TaskStatus.Phase.MAP,new Counters());
feedback=listener.statusUpdate(attemptID,mockStatus);
assertTrue(feedback.getTaskFound());
verify(hbHandler).progressing(eq(attemptId));
listener.close();
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testCommitWindow() throws IOException {
SystemClock clock=new SystemClock();
org.apache.hadoop.mapreduce.v2.app.job.Task mockTask=mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class);
when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true);
Job mockJob=mock(Job.class);
when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask);
AppContext appCtx=mock(AppContext.class);
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
when(appCtx.getClock()).thenReturn(clock);
JobTokenSecretManager secret=mock(JobTokenSecretManager.class);
RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class);
final TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class);
Dispatcher dispatcher=mock(Dispatcher.class);
EventHandler ea=mock(EventHandler.class);
when(dispatcher.getEventHandler()).thenReturn(ea);
when(appCtx.getEventHandler()).thenReturn(ea);
CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy();
policy.init(appCtx);
TaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,policy){
@Override protected void registerHeartbeatHandler( Configuration conf){
taskHeartbeatHandler=hbHandler;
}
}
;
Configuration conf=new Configuration();
listener.init(conf);
listener.start();
TaskAttemptID tid=new TaskAttemptID("12345",1,TaskType.REDUCE,1,0);
boolean canCommit=listener.canCommit(tid);
assertFalse(canCommit);
verify(mockTask,never()).canCommit(any(TaskAttemptId.class));
when(rmHeartbeatHandler.getLastHeartbeatTime()).thenReturn(clock.getTime());
canCommit=listener.canCommit(tid);
assertTrue(canCommit);
verify(mockTask,times(1)).canCommit(any(TaskAttemptId.class));
listener.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test without TASK_LOG_DIR
* @throws IOException
*/
@Test(timeout=50000) public void testTaskLogWithoutTaskLogDir() throws IOException {
System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR);
assertEquals(TaskLog.getMRv2LogDir(),null);
TaskAttemptID taid=mock(TaskAttemptID.class);
JobID jid=new JobID("job",1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("stdout"));
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test TaskAttemptID
* @throws IOException
*/
@Test(timeout=50000) public void testTaskLog() throws IOException {
System.setProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR,"testString");
assertEquals(TaskLog.getMRv2LogDir(),"testString");
TaskAttemptID taid=mock(TaskAttemptID.class);
JobID jid=new JobID("job",1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("testString" + File.separatorChar + "stdout"));
File indexFile=TaskLog.getIndexFile(taid,true);
if (!indexFile.getParentFile().exists()) {
indexFile.getParentFile().mkdirs();
}
indexFile.delete();
indexFile.createNewFile();
TaskLog.syncLogs("location",taid,true);
assertTrue(indexFile.getAbsolutePath().endsWith("userlogs" + File.separatorChar + "job_job_0001"+ File.separatorChar+ "JobId.cleanup"+ File.separatorChar+ "log.index"));
f=TaskLog.getRealTaskLogFileLocation(taid,true,LogName.DEBUGOUT);
if (f != null) {
assertTrue(f.getAbsolutePath().endsWith("location" + File.separatorChar + "debugout"));
FileUtils.copyFile(indexFile,f);
}
assertTrue(TaskLog.obtainLogDirOwner(taid).length() > 0);
assertTrue(readTaskLog(TaskLog.LogName.DEBUGOUT,taid,true).length() > 0);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test TaskLogAppender
*/
@SuppressWarnings("deprecation") @Test(timeout=5000) public void testTaskLogAppender(){
TaskLogAppender appender=new TaskLogAppender();
System.setProperty(TaskLogAppender.TASKID_PROPERTY,"attempt_01_02_m03_04_001");
System.setProperty(TaskLogAppender.LOGSIZE_PROPERTY,"1003");
appender.activateOptions();
assertEquals(appender.getTaskId(),"attempt_01_02_m03_04_001");
assertEquals(appender.getTotalLogFileSize(),1000);
assertEquals(appender.getIsCleanup(),false);
Writer writer=new StringWriter();
appender.setWriter(writer);
Layout layout=new PatternLayout("%-5p [%t]: %m%n");
appender.setLayout(layout);
Category logger=Logger.getLogger(getClass().getName());
LoggingEvent event=new LoggingEvent("fqnOfCategoryClass",logger,Priority.INFO,"message",new Throwable());
appender.append(event);
appender.flush();
appender.close();
assertTrue(writer.toString().length() > 0);
appender=new TaskLogAppender();
appender.setIsCleanup(true);
appender.activateOptions();
assertEquals(appender.getIsCleanup(),true);
}
InternalCallVerifier EqualityVerifier
@Test public void testPeriodStatsets(){
PeriodicStatsAccumulator cumulative=new CumulativePeriodicStats(8);
PeriodicStatsAccumulator status=new StatePeriodicStats(8);
cumulative.extend(0.0D,0);
cumulative.extend(0.4375D,700);
cumulative.extend(0.5625D,1100);
cumulative.extend(0.625D,1300);
cumulative.extend(1.0D,7901);
int total=0;
int[] results=cumulative.getValues();
for (int i=0; i < 8; ++i) {
System.err.println("segment i = " + results[i]);
}
assertEquals("Bad interpolation in cumulative segment 0",200,results[0]);
assertEquals("Bad interpolation in cumulative segment 1",200,results[1]);
assertEquals("Bad interpolation in cumulative segment 2",200,results[2]);
assertEquals("Bad interpolation in cumulative segment 3",300,results[3]);
assertEquals("Bad interpolation in cumulative segment 4",400,results[4]);
assertEquals("Bad interpolation in cumulative segment 5",2200,results[5]);
assertEquals("Bad interpolation in cumulative segment 6",2200,results[6]);
assertEquals("Bad interpolation in cumulative segment 7",2201,results[7]);
status.extend(0.0D,0);
status.extend(1.0D / 16.0D,300);
status.extend(3.0D / 16.0D,700);
status.extend(7.0D / 16.0D,2300);
status.extend(1.0D,1400);
;
results=status.getValues();
assertEquals("Bad interpolation in status segment 0",275,results[0]);
assertEquals("Bad interpolation in status segment 1",750,results[1]);
assertEquals("Bad interpolation in status segment 2",1500,results[2]);
assertEquals("Bad interpolation in status segment 3",2175,results[3]);
assertEquals("Bad interpolation in status segment 4",2100,results[4]);
assertEquals("Bad interpolation in status segment 5",1900,results[5]);
assertEquals("Bad interpolation in status segment 6",1700,results[6]);
assertEquals("Bad interpolation in status segment 7",1500,results[7]);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test the {@link TaskStatus} against large sized task-diagnostic-info and
* state-string. Does the following
* - create Map/Reduce TaskStatus such that the task-diagnostic-info and
* state-string are small strings and check their contents
* - append them with small string and check their contents
* - append them with large string and check their size
* - update the status using statusUpdate() calls and check the size/contents
* - create Map/Reduce TaskStatus with large string and check their size
*/
@Test public void testTaskDiagnosticsAndStateString(){
String test="hi";
final int maxSize=16;
TaskStatus status=new TaskStatus(null,0,0,null,test,test,null,null,null){
@Override protected int getMaxStringSize(){
return maxSize;
}
@Override public void addFetchFailedMap( TaskAttemptID mapTaskId){
}
@Override public boolean getIsMap(){
return false;
}
}
;
assertEquals("Small diagnostic info test failed",status.getDiagnosticInfo(),test);
assertEquals("Small state string test failed",status.getStateString(),test);
String newDInfo=test.concat(test);
status.setDiagnosticInfo(test);
status.setStateString(newDInfo);
assertEquals("Small diagnostic info append failed",newDInfo,status.getDiagnosticInfo());
assertEquals("Small state-string append failed",newDInfo,status.getStateString());
TaskStatus newStatus=(TaskStatus)status.clone();
String newSInfo="hi1";
newStatus.setStateString(newSInfo);
status.statusUpdate(newStatus);
newDInfo=newDInfo.concat(newStatus.getDiagnosticInfo());
assertEquals("Status-update on diagnostic-info failed",newDInfo,status.getDiagnosticInfo());
assertEquals("Status-update on state-string failed",newSInfo,status.getStateString());
newSInfo="hi2";
status.statusUpdate(0,newSInfo,null);
assertEquals("Status-update on state-string failed",newSInfo,status.getStateString());
newSInfo="hi3";
status.statusUpdate(null,0,newSInfo,null,0);
assertEquals("Status-update on state-string failed",newSInfo,status.getStateString());
String large="hihihihihihihihihihi";
status.setDiagnosticInfo(large);
status.setStateString(large);
assertEquals("Large diagnostic info append test failed",maxSize,status.getDiagnosticInfo().length());
assertEquals("Large state-string append test failed",maxSize,status.getStateString().length());
newStatus.setDiagnosticInfo(large + "0");
newStatus.setStateString(large + "1");
status.statusUpdate(newStatus);
assertEquals("Status-update on diagnostic info failed",maxSize,status.getDiagnosticInfo().length());
assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length());
status.statusUpdate(0,large + "2",null);
assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length());
status.statusUpdate(null,0,large + "3",null,0);
assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length());
status=new TaskStatus(null,0,0,null,large,large,null,null,null){
@Override protected int getMaxStringSize(){
return maxSize;
}
@Override public void addFetchFailedMap( TaskAttemptID mapTaskId){
}
@Override public boolean getIsMap(){
return false;
}
}
;
assertEquals("Large diagnostic info test failed",maxSize,status.getDiagnosticInfo().length());
assertEquals("Large state-string test failed",maxSize,status.getStateString().length());
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=500000) public void testFormat() throws Exception {
JobConf job=new JobConf(defaultConf);
Path file=new Path(workDir,"test.txt");
Reporter reporter=Reporter.NULL;
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
Writer writer=new OutputStreamWriter(localFs.create(file));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
TextInputFormat format=new TextInputFormat();
format.configure(job);
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(job,numSplits);
LOG.debug("splitting: got = " + splits.length);
if (length == 0) {
assertEquals("Files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length);
assertEquals("Empty file length == 0",0,splits[0].getLength());
}
BitSet bits=new BitSet(length);
for (int j=0; j < splits.length; j++) {
LOG.debug("split[" + j + "]= "+ splits[j]);
RecordReader reader=format.getRecordReader(splits[j],job,reporter);
try {
int count=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ count);
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test readLine for correct interpretation of maxLineLength
* (returned string should be clipped at maxLineLength, and the
* remaining bytes on the same line should be thrown out).
* Also check that returned value matches the string length.
* Varies buffer size to stress test.
* @throws Exception
*/
@Test(timeout=5000) public void testMaxLineLength() throws Exception {
final String STR="a\nbb\n\nccc\rdddd\r\neeeee";
final int STRLENBYTES=STR.getBytes().length;
Text out=new Text();
for (int bufsz=1; bufsz < STRLENBYTES + 1; ++bufsz) {
LineReader in=makeStream(STR,bufsz);
int c=0;
c+=in.readLine(out,1);
assertEquals("line1 length, bufsz: " + bufsz,1,out.getLength());
c+=in.readLine(out,1);
assertEquals("line2 length, bufsz: " + bufsz,1,out.getLength());
c+=in.readLine(out,1);
assertEquals("line3 length, bufsz: " + bufsz,0,out.getLength());
c+=in.readLine(out,3);
assertEquals("line4 length, bufsz: " + bufsz,3,out.getLength());
c+=in.readLine(out,10);
assertEquals("line5 length, bufsz: " + bufsz,4,out.getLength());
c+=in.readLine(out,8);
assertEquals("line5 length, bufsz: " + bufsz,5,out.getLength());
assertEquals("end of file, bufsz: " + bufsz,0,in.readLine(out));
assertEquals("total bytes, bufsz: " + bufsz,c,STRLENBYTES);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec and an empty input file
*/
@Test(timeout=5000) public void testGzipEmpty() throws IOException {
JobConf job=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,job);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"empty.gz"),gzip,"");
FileInputFormat.setInputPaths(job,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(job);
InputSplit[] splits=format.getSplits(job,100);
assertEquals("Compressed files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length);
List results=readSplit(format,splits[0],job);
assertEquals("Compressed empty file length == 0",0,results.size());
}
InternalCallVerifier BooleanVerifier PublicFieldVerifier
@Test(timeout=5000) public void testMRMaxLine() throws Exception {
final int MAXPOS=1024 * 1024;
final int MAXLINE=10 * 1024;
final int BUF=64 * 1024;
final InputStream infNull=new InputStream(){
int position=0;
final int MAXPOSBUF=1024 * 1024 + BUF;
@Override public int read(){
++position;
return 0;
}
@Override public int read( byte[] b){
assertTrue("Read too many bytes from the stream",position < MAXPOSBUF);
Arrays.fill(b,(byte)0);
position+=b.length;
return b.length;
}
public void reset(){
position=0;
}
}
;
final LongWritable key=new LongWritable();
final Text val=new Text();
LOG.info("Reading a line from /dev/null");
final Configuration conf=new Configuration(false);
conf.setInt(org.apache.hadoop.mapreduce.lib.input.LineRecordReader.MAX_LINE_LENGTH,MAXLINE);
conf.setInt("io.file.buffer.size",BUF);
LineRecordReader lrr=new LineRecordReader(infNull,0,MAXPOS,conf);
assertFalse("Read a line from null",lrr.next(key,val));
infNull.reset();
lrr=new LineRecordReader(infNull,0L,MAXLINE,MAXPOS);
assertFalse("Read a line from null",lrr.next(key,val));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=900000) public void testSplitableCodecs() throws IOException {
JobConf conf=new JobConf(defaultConf);
int seed=new Random().nextInt();
CompressionCodec codec=null;
try {
codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf);
}
catch ( ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file=new Path(workDir,"test" + codec.getDefaultExtension());
Reporter reporter=Reporter.NULL;
LOG.info("seed = " + seed);
Random random=new Random(seed);
FileSystem localFs=FileSystem.getLocal(conf);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(conf,workDir);
final int MAX_LENGTH=500000;
for (int length=MAX_LENGTH / 2; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) {
LOG.info("creating; entries = " + length);
Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
TextInputFormat format=new TextInputFormat();
format.configure(conf);
LongWritable key=new LongWritable();
Text value=new Text();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1;
LOG.info("splitting: requesting = " + numSplits);
InputSplit[] splits=format.getSplits(conf,numSplits);
LOG.info("splitting: got = " + splits.length);
BitSet bits=new BitSet(length);
for (int j=0; j < splits.length; j++) {
LOG.debug("split[" + j + "]= "+ splits[j]);
RecordReader reader=format.getRecordReader(splits[j],conf,reporter);
try {
int counter=0;
while (reader.next(key,value)) {
int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
if (bits.get(v)) {
LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos());
}
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
counter++;
}
if (counter > 0) {
LOG.info("splits[" + j + "]="+ splits[j]+ " count="+ counter);
}
else {
LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ counter);
}
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Test readLine for various kinds of line termination sequneces.
* Varies buffer size to stress test. Also check that returned
* value matches the string length.
* @throws Exception
*/
@Test(timeout=5000) public void testNewLines() throws Exception {
final String STR="a\nbb\n\nccc\rdddd\r\r\r\n\r\neeeee";
final int STRLENBYTES=STR.getBytes().length;
Text out=new Text();
for (int bufsz=1; bufsz < STRLENBYTES + 1; ++bufsz) {
LineReader in=makeStream(STR,bufsz);
int c=0;
c+=in.readLine(out);
assertEquals("line1 length, bufsz:" + bufsz,1,out.getLength());
c+=in.readLine(out);
assertEquals("line2 length, bufsz:" + bufsz,2,out.getLength());
c+=in.readLine(out);
assertEquals("line3 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line4 length, bufsz:" + bufsz,3,out.getLength());
c+=in.readLine(out);
assertEquals("line5 length, bufsz:" + bufsz,4,out.getLength());
c+=in.readLine(out);
assertEquals("line6 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line7 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line8 length, bufsz:" + bufsz,0,out.getLength());
c+=in.readLine(out);
assertEquals("line9 length, bufsz:" + bufsz,5,out.getLength());
assertEquals("end of file, bufsz: " + bufsz,0,in.readLine(out));
assertEquals("total bytes, bufsz: " + bufsz,c,STRLENBYTES);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec for reading
*/
@Test(timeout=5000) public void testGzip() throws IOException {
JobConf job=new JobConf(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,job);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
FileInputFormat.setInputPaths(job,workDir);
TextInputFormat format=new TextInputFormat();
format.configure(job);
InputSplit[] splits=format.getSplits(job,100);
assertEquals("compressed splits == 2",2,splits.length);
FileSplit tmp=(FileSplit)splits[0];
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits[0]=splits[1];
splits[1]=tmp;
}
List results=readSplit(format,splits[0],job);
assertEquals("splits[0] length",6,results.size());
assertEquals("splits[0][5]"," dog",results.get(5).toString());
results=readSplit(format,splits[1],job);
assertEquals("splits[1] length",2,results.size());
assertEquals("splits[1][0]","this is a test",results.get(0).toString());
assertEquals("splits[1][1]","of gzip",results.get(1).toString());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testUTF8() throws Exception {
LineReader in=makeStream("abcd\u20acbdcd\u20ac");
Text line=new Text();
in.readLine(line);
assertEquals("readLine changed utf8 characters","abcd\u20acbdcd\u20ac",line.toString());
in=makeStream("abc\u200axyz");
in.readLine(line);
assertEquals("split on fake newline","abc\u200axyz",line.toString());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testFormat() throws Exception {
JobConf job=new JobConf();
job.set(JobContext.TASK_ATTEMPT_ID,attempt);
FileOutputFormat.setOutputPath(job,workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job,workDir);
FileSystem fs=workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
String file="test_format.txt";
Reporter reporter=Reporter.NULL;
TextOutputFormat
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testFormatWithCustomSeparator() throws Exception {
JobConf job=new JobConf();
String separator="\u0001";
job.set("mapreduce.output.textoutputformat.separator",separator);
job.set(JobContext.TASK_ATTEMPT_ID,attempt);
FileOutputFormat.setOutputPath(job,workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job,workDir);
FileSystem fs=workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
String file="test_custom.txt";
Reporter reporter=Reporter.NULL;
TextOutputFormat theOutputFormat=new TextOutputFormat();
RecordWriter theRecordWriter=theOutputFormat.getRecordWriter(localFs,job,file,reporter);
Text key1=new Text("key1");
Text key2=new Text("key2");
Text val1=new Text("val1");
Text val2=new Text("val2");
NullWritable nullWritable=NullWritable.get();
try {
theRecordWriter.write(key1,val1);
theRecordWriter.write(null,nullWritable);
theRecordWriter.write(null,val1);
theRecordWriter.write(nullWritable,val2);
theRecordWriter.write(key2,nullWritable);
theRecordWriter.write(key1,null);
theRecordWriter.write(null,null);
theRecordWriter.write(key2,val2);
}
finally {
theRecordWriter.close(reporter);
}
File expectedFile=new File(new Path(workDir,file).toString());
StringBuffer expectedOutput=new StringBuffer();
expectedOutput.append(key1).append(separator).append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append(separator).append(val2).append("\n");
String output=UtilsForTests.slurp(expectedFile);
assertEquals(expectedOutput.toString(),output);
}
InternalCallVerifier BooleanVerifier
@Test public void testLogFilter(){
PathFilter filter=new Utils.OutputFileUtils.OutputLogFilter();
for ( Path p : LOG_PATHS) {
assertFalse(filter.accept(p));
}
for ( Path p : SUCCEEDED_PATHS) {
assertTrue(filter.accept(p));
}
for ( Path p : PASS_PATHS) {
assertTrue(filter.accept(p));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testOutputFilesFilter(){
PathFilter filter=new Utils.OutputFileUtils.OutputFilesFilter();
for ( Path p : LOG_PATHS) {
assertFalse(filter.accept(p));
}
for ( Path p : SUCCEEDED_PATHS) {
assertFalse(filter.accept(p));
}
for ( Path p : PASS_PATHS) {
assertTrue(filter.accept(p));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testHistoryServerToken() throws Exception {
conf.set(YarnConfiguration.RM_PRINCIPAL,"foo@LOCAL");
final String masterPrincipal=Master.getMasterPrincipal(conf);
final MRClientProtocol hsProxy=mock(MRClientProtocol.class);
when(hsProxy.getDelegationToken(any(GetDelegationTokenRequest.class))).thenAnswer(new Answer(){
public GetDelegationTokenResponse answer( InvocationOnMock invocation){
GetDelegationTokenRequest request=(GetDelegationTokenRequest)invocation.getArguments()[0];
assertEquals(masterPrincipal,request.getRenewer());
org.apache.hadoop.yarn.api.records.Token token=recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Token.class);
token.setKind("");
token.setService("");
token.setIdentifier(ByteBuffer.allocate(0));
token.setPassword(ByteBuffer.allocate(0));
GetDelegationTokenResponse tokenResponse=recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
tokenResponse.setDelegationToken(token);
return tokenResponse;
}
}
);
UserGroupInformation.createRemoteUser("someone").doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
yarnRunner=new YARNRunner(conf,null,null);
yarnRunner.getDelegationTokenFromHS(hsProxy);
verify(hsProxy).getDelegationToken(any(GetDelegationTokenRequest.class));
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAMStandardEnv() throws Exception {
final String ADMIN_LIB_PATH="foo";
final String USER_LIB_PATH="bar";
final String USER_SHELL="shell";
JobConf jobConf=new JobConf();
jobConf.set(MRJobConfig.MR_AM_ADMIN_USER_ENV,"LD_LIBRARY_PATH=" + ADMIN_LIB_PATH);
jobConf.set(MRJobConfig.MR_AM_ENV,"LD_LIBRARY_PATH=" + USER_LIB_PATH);
jobConf.set(MRJobConfig.MAPRED_ADMIN_USER_SHELL,USER_SHELL);
YARNRunner yarnRunner=new YARNRunner(jobConf);
ApplicationSubmissionContext appSubCtx=buildSubmitContext(yarnRunner,jobConf);
ContainerLaunchContext clc=appSubCtx.getAMContainerSpec();
Map env=clc.getEnvironment();
String libPath=env.get(Environment.LD_LIBRARY_PATH.name());
assertNotNull("LD_LIBRARY_PATH not set",libPath);
String cps=jobConf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM) ? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator;
assertEquals("Bad AM LD_LIBRARY_PATH setting",MRApps.crossPlatformifyMREnv(conf,Environment.PWD) + cps + ADMIN_LIB_PATH+ cps+ USER_LIB_PATH,libPath);
String shell=env.get(Environment.SHELL.name());
assertNotNull("SHELL not set",shell);
assertEquals("Bad SHELL setting",USER_SHELL,shell);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test if {@link CompressionEmulationUtil#configureCompressionEmulation(org.apache.hadoop.mapred.JobConf,org.apache.hadoop.mapred.JobConf)}can extract compression related configuration parameters.
*/
@Test public void testExtractCompressionConfigs(){
JobConf source=new JobConf();
JobConf target=new JobConf();
source.setBoolean(FileOutputFormat.COMPRESS,false);
source.set(FileOutputFormat.COMPRESS_CODEC,"MyDefaultCodec");
source.set(FileOutputFormat.COMPRESS_TYPE,"MyDefaultType");
source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,false);
source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC,"MyDefaultCodec2");
CompressionEmulationUtil.configureCompressionEmulation(source,target);
assertFalse(target.getBoolean(FileOutputFormat.COMPRESS,true));
assertEquals("MyDefaultCodec",target.get(FileOutputFormat.COMPRESS_CODEC));
assertEquals("MyDefaultType",target.get(FileOutputFormat.COMPRESS_TYPE));
assertFalse(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true));
assertEquals("MyDefaultCodec2",target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC));
assertFalse(CompressionEmulationUtil.isInputCompressionEmulationEnabled(target));
source.setBoolean(FileOutputFormat.COMPRESS,true);
source.set(FileOutputFormat.COMPRESS_CODEC,"MyCodec");
source.set(FileOutputFormat.COMPRESS_TYPE,"MyType");
source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true);
source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC,"MyCodec2");
org.apache.hadoop.mapred.FileInputFormat.setInputPaths(source,"file.gz");
target=new JobConf();
CompressionEmulationUtil.configureCompressionEmulation(source,target);
assertTrue(target.getBoolean(FileOutputFormat.COMPRESS,false));
assertEquals("MyCodec",target.get(FileOutputFormat.COMPRESS_CODEC));
assertEquals("MyType",target.get(FileOutputFormat.COMPRESS_TYPE));
assertTrue(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,false));
assertEquals("MyCodec2",target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC));
assertTrue(CompressionEmulationUtil.isInputCompressionEmulationEnabled(target));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test {@link CompressionEmulationUtil#getPossiblyDecompressedInputStream(Path,Configuration,long)}and{@link CompressionEmulationUtil#getPossiblyCompressedOutputStream(Path,Configuration)}.
*/
@Test public void testPossiblyCompressedDecompressedStreams() throws IOException {
JobConf conf=new JobConf();
FileSystem lfs=FileSystem.getLocal(conf);
String inputLine="Hi Hello!";
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
conf.setBoolean(FileOutputFormat.COMPRESS,true);
conf.setClass(FileOutputFormat.COMPRESS_CODEC,GzipCodec.class,CompressionCodec.class);
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory());
Path tempDir=new Path(rootTempDir,"TestPossiblyCompressedDecompressedStreams");
lfs.delete(tempDir,true);
Path compressedFile=new Path(tempDir,"test");
OutputStream out=CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile,conf);
BufferedWriter writer=new BufferedWriter(new OutputStreamWriter(out));
writer.write(inputLine);
writer.close();
compressedFile=compressedFile.suffix(".gz");
InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(compressedFile,conf,0);
BufferedReader reader=new BufferedReader(new InputStreamReader(in));
String readLine=reader.readLine();
assertEquals("Compression/Decompression error",inputLine,readLine);
reader.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test {@link RandomTextDataMapper} via {@link CompressionEmulationUtil}.
*/
@Test public void testRandomCompressedTextDataGenerator() throws Exception {
int wordSize=10;
int listSize=20;
long dataSize=10 * 1024 * 1024;
Configuration conf=new Configuration();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE,listSize);
conf.setInt(RandomTextDataGenerator.GRIDMIX_DATAGEN_RANDOMTEXT_WORDSIZE,wordSize);
conf.setLong(GenerateData.GRIDMIX_GEN_BYTES,dataSize);
conf.set("mapreduce.job.hdfs-servers","");
FileSystem lfs=FileSystem.getLocal(conf);
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory());
Path tempDir=new Path(rootTempDir,"TestRandomCompressedTextDataGenr");
lfs.delete(tempDir,true);
runDataGenJob(conf,tempDir);
FileStatus[] files=lfs.listStatus(tempDir,new Utils.OutputFileUtils.OutputFilesFilter());
long size=0;
long maxLineSize=0;
for ( FileStatus status : files) {
InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(status.getPath(),conf,0);
BufferedReader reader=new BufferedReader(new InputStreamReader(in));
String line=reader.readLine();
if (line != null) {
long lineSize=line.getBytes().length;
if (lineSize > maxLineSize) {
maxLineSize=lineSize;
}
while (line != null) {
for ( String word : line.split("\\s")) {
size+=word.getBytes().length;
}
line=reader.readLine();
}
}
reader.close();
}
assertTrue(size >= dataSize);
assertTrue(size <= dataSize + maxLineSize);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test compressible {@link GridmixRecord}.
*/
@Test public void testCompressibleGridmixRecord() throws IOException {
JobConf conf=new JobConf();
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true);
FileSystem lfs=FileSystem.getLocal(conf);
int dataSize=1024 * 1024 * 10;
float ratio=0.357F;
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory());
Path tempDir=new Path(rootTempDir,"TestPossiblyCompressibleGridmixRecord");
lfs.delete(tempDir,true);
GridmixRecord record=new GridmixRecord(dataSize,0);
record.setCompressibility(true,ratio);
conf.setClass(FileOutputFormat.COMPRESS_CODEC,GzipCodec.class,CompressionCodec.class);
org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf,true);
Path recordFile=new Path(tempDir,"record");
OutputStream outStream=CompressionEmulationUtil.getPossiblyCompressedOutputStream(recordFile,conf);
DataOutputStream out=new DataOutputStream(outStream);
record.write(out);
out.close();
outStream.close();
Path actualRecordFile=recordFile.suffix(".gz");
InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(actualRecordFile,conf,0);
long compressedFileSize=lfs.listStatus(actualRecordFile)[0].getLen();
GridmixRecord recordRead=new GridmixRecord();
recordRead.readFields(new DataInputStream(in));
assertEquals("Record size mismatch in a compressible GridmixRecord",dataSize,recordRead.getSize());
assertTrue("Failed to generate a compressible GridmixRecord",recordRead.getSize() > compressedFileSize);
float seenRatio=((float)compressedFileSize) / dataSize;
assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio),CompressionEmulationUtil.standardizeCompressionRatio(seenRatio),1.0D);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test the configuration property for disabling/enabling emulation of
* distributed cache load.
*/
@Test(timeout=2000) public void testDistCacheEmulationConfigurability() throws IOException {
Configuration jobConf=GridmixTestUtils.mrvl.getConfig();
Path ioPath=new Path("testDistCacheEmulationConfigurability").makeQualified(GridmixTestUtils.dfs.getUri(),GridmixTestUtils.dfs.getWorkingDirectory());
FileSystem fs=FileSystem.get(jobConf);
FileSystem.mkdirs(fs,ioPath,new FsPermission((short)0777));
dce=createDistributedCacheEmulator(jobConf,ioPath,false);
assertTrue("Default configuration of " + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE + " is wrong.",dce.shouldEmulateDistCacheLoad());
jobConf.setBoolean(DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE,false);
dce=createDistributedCacheEmulator(jobConf,ioPath,false);
assertFalse("Disabling of emulation of distributed cache load by setting " + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE + " to false is not working.",dce.shouldEmulateDistCacheLoad());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Validate GenerateDistCacheData job if it creates dist cache files properly.
* @throws Exception
*/
@Test(timeout=200000) public void testGenerateDistCacheData() throws Exception {
long[] sortedFileSizes=new long[5];
Configuration jobConf=runSetupGenerateDistCacheData(true,sortedFileSizes);
GridmixJob gridmixJob=new GenerateDistCacheData(jobConf);
Job job=gridmixJob.call();
assertEquals("Number of reduce tasks in GenerateDistCacheData is not 0.",0,job.getNumReduceTasks());
assertTrue("GenerateDistCacheData job failed.",job.waitForCompletion(false));
validateDistCacheData(jobConf,sortedFileSizes);
}
InternalCallVerifier EqualityVerifier
/**
* test method configureDistCacheFiles
*/
@Test(timeout=2000) public void testDistCacheEmulator() throws Exception {
Configuration conf=new Configuration();
configureDummyDistCacheFiles(conf);
File ws=new File("target" + File.separator + this.getClass().getName());
Path ioPath=new Path(ws.getAbsolutePath());
DistributedCacheEmulator dce=new DistributedCacheEmulator(conf,ioPath);
JobConf jobConf=new JobConf(conf);
jobConf.setUser(UserGroupInformation.getCurrentUser().getShortUserName());
File fin=new File("src" + File.separator + "test"+ File.separator+ "resources"+ File.separator+ "data"+ File.separator+ "wordcount.json");
dce.init(fin.getAbsolutePath(),JobCreator.LOADJOB,true);
dce.configureDistCacheFiles(conf,jobConf);
String[] caches=conf.getStrings(MRJobConfig.CACHE_FILES);
String[] tmpfiles=conf.getStrings("tmpfiles");
assertEquals(6,((caches == null ? 0 : caches.length) + (tmpfiles == null ? 0 : tmpfiles.length)));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPool() throws Exception {
final Random r=new Random();
final Configuration conf=new Configuration();
conf.setLong(FilePool.GRIDMIX_MIN_FILE,3 * 1024);
final FilePool pool=new FilePool(conf,base);
pool.refresh();
final ArrayList files=new ArrayList();
final int expectedPoolSize=(NFILES / 2 * (NFILES / 2 + 1) - 6) * 1024;
assertEquals(expectedPoolSize,pool.getInputFiles(Long.MAX_VALUE,files));
assertEquals(NFILES - 4,files.size());
files.clear();
assertEquals(expectedPoolSize,pool.getInputFiles(expectedPoolSize,files));
files.clear();
final long rand=r.nextInt(expectedPoolSize);
assertTrue("Missed: " + rand,(NFILES / 2) * 1024 > rand - pool.getInputFiles(rand,files));
conf.setLong(FilePool.GRIDMIX_MIN_FILE,0);
pool.refresh();
files.clear();
assertEquals((NFILES / 2 * (NFILES / 2 + 1)) * 1024,pool.getInputFiles(Long.MAX_VALUE,files));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testLoadJobLoadSortComparator() throws Exception {
LoadJob.LoadSortComparator test=new LoadJob.LoadSortComparator();
ByteArrayOutputStream data=new ByteArrayOutputStream();
DataOutputStream dos=new DataOutputStream(data);
WritableUtils.writeVInt(dos,2);
WritableUtils.writeVInt(dos,1);
WritableUtils.writeVInt(dos,4);
WritableUtils.writeVInt(dos,7);
WritableUtils.writeVInt(dos,4);
byte[] b1=data.toByteArray();
byte[] b2=data.toByteArray();
assertEquals(0,test.compare(b1,0,1,b2,0,1));
b2[2]=5;
assertEquals(-1,test.compare(b1,0,1,b2,0,1));
b2[2]=2;
assertEquals(2,test.compare(b1,0,1,b2,0,1));
b2[2]=4;
assertEquals(1,test.compare(b1,0,1,b2,1,1));
}
IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testLoadJobLoadRecordReader() throws Exception {
LoadJob.LoadRecordReader test=new LoadJob.LoadRecordReader();
Configuration conf=new Configuration();
FileSystem fs1=mock(FileSystem.class);
when(fs1.open((Path)anyObject())).thenReturn(new FakeFSDataInputStream(new FakeInputStream()));
Path p1=mock(Path.class);
when(p1.getFileSystem((JobConf)anyObject())).thenReturn(fs1);
FileSystem fs2=mock(FileSystem.class);
when(fs2.open((Path)anyObject())).thenReturn(new FakeFSDataInputStream(new FakeInputStream()));
Path p2=mock(Path.class);
when(p2.getFileSystem((JobConf)anyObject())).thenReturn(fs2);
Path[] paths={p1,p2};
long[] start={0,0};
long[] lengths={1000,1000};
String[] locations={"temp1","temp2"};
CombineFileSplit cfsplit=new CombineFileSplit(paths,start,lengths,locations);
double[] reduceBytes={100,100};
double[] reduceRecords={2,2};
long[] reduceOutputBytes={500,500};
long[] reduceOutputRecords={2,2};
ResourceUsageMetrics metrics=new ResourceUsageMetrics();
ResourceUsageMetrics[] rMetrics={new ResourceUsageMetrics(),new ResourceUsageMetrics()};
LoadSplit input=new LoadSplit(cfsplit,2,3,1500L,2L,3000L,2L,reduceBytes,reduceRecords,reduceOutputBytes,reduceOutputRecords,metrics,rMetrics);
TaskAttemptID taskId=new TaskAttemptID();
TaskAttemptContext ctx=new TaskAttemptContextImpl(conf,taskId);
test.initialize(input,ctx);
GridmixRecord gr=test.getCurrentValue();
int counter=0;
while (test.nextKeyValue()) {
gr=test.getCurrentValue();
if (counter == 0) {
assertEquals(0.5,test.getProgress(),0.001);
}
else if (counter == 1) {
assertEquals(1.0,test.getProgress(),0.001);
}
assertEquals(1000,gr.getSize());
counter++;
}
assertEquals(1000,gr.getSize());
assertEquals(2,counter);
test.close();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testGridmixSplit() throws Exception {
Path[] files={new Path("one"),new Path("two")};
long[] start={1,2};
long[] lengths={100,200};
String[] locations={"locOne","loctwo"};
CombineFileSplit cfSplit=new CombineFileSplit(files,start,lengths,locations);
ResourceUsageMetrics metrics=new ResourceUsageMetrics();
metrics.setCumulativeCpuUsage(200);
double[] reduceBytes={8.1d,8.2d};
double[] reduceRecords={9.1d,9.2d};
long[] reduceOutputBytes={101L,102L};
long[] reduceOutputRecords={111L,112L};
GridmixSplit test=new GridmixSplit(cfSplit,2,3,4L,5L,6L,7L,reduceBytes,reduceRecords,reduceOutputBytes,reduceOutputRecords);
ByteArrayOutputStream data=new ByteArrayOutputStream();
DataOutputStream out=new DataOutputStream(data);
test.write(out);
GridmixSplit copy=new GridmixSplit();
copy.readFields(new DataInputStream(new ByteArrayInputStream(data.toByteArray())));
assertEquals(test.getId(),copy.getId());
assertEquals(test.getMapCount(),copy.getMapCount());
assertEquals(test.getInputRecords(),copy.getInputRecords());
assertEquals(test.getOutputBytes()[0],copy.getOutputBytes()[0]);
assertEquals(test.getOutputRecords()[0],copy.getOutputRecords()[0]);
assertEquals(test.getReduceBytes(0),copy.getReduceBytes(0));
assertEquals(test.getReduceRecords(0),copy.getReduceRecords(0));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testLoadSplit() throws Exception {
LoadSplit test=getLoadSplit();
ByteArrayOutputStream data=new ByteArrayOutputStream();
DataOutputStream out=new DataOutputStream(data);
test.write(out);
LoadSplit copy=new LoadSplit();
copy.readFields(new DataInputStream(new ByteArrayInputStream(data.toByteArray())));
assertEquals(test.getId(),copy.getId());
assertEquals(test.getMapCount(),copy.getMapCount());
assertEquals(test.getInputRecords(),copy.getInputRecords());
assertEquals(test.getOutputBytes()[0],copy.getOutputBytes()[0]);
assertEquals(test.getOutputRecords()[0],copy.getOutputRecords()[0]);
assertEquals(test.getReduceBytes(0),copy.getReduceBytes(0));
assertEquals(test.getReduceRecords(0),copy.getReduceRecords(0));
assertEquals(test.getMapResourceUsageMetrics().getCumulativeCpuUsage(),copy.getMapResourceUsageMetrics().getCumulativeCpuUsage());
assertEquals(test.getReduceResourceUsageMetrics(0).getCumulativeCpuUsage(),copy.getReduceResourceUsageMetrics(0).getCumulativeCpuUsage());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testGridmixJobSpecGroupingComparator() throws Exception {
GridmixJob.SpecGroupingComparator test=new GridmixJob.SpecGroupingComparator();
ByteArrayOutputStream data=new ByteArrayOutputStream();
DataOutputStream dos=new DataOutputStream(data);
WritableUtils.writeVInt(dos,2);
WritableUtils.writeVInt(dos,1);
WritableUtils.writeVInt(dos,0);
WritableUtils.writeVInt(dos,7);
WritableUtils.writeVInt(dos,4);
byte[] b1=data.toByteArray();
byte[] b2=data.toByteArray();
assertEquals(0,test.compare(b1,0,1,b2,0,1));
b2[2]=1;
assertEquals(-1,test.compare(b1,0,1,b2,0,1));
b2[2]=1;
assertEquals(-1,test.compare(b1,0,1,b2,0,1));
assertEquals(0,test.compare(new GridmixKey(GridmixKey.DATA,100,2),new GridmixKey(GridmixKey.DATA,100,2)));
assertEquals(-1,test.compare(new GridmixKey(GridmixKey.REDUCE_SPEC,100,2),new GridmixKey(GridmixKey.DATA,100,2)));
assertEquals(1,test.compare(new GridmixKey(GridmixKey.DATA,100,2),new GridmixKey(GridmixKey.REDUCE_SPEC,100,2)));
assertEquals(2,test.compare(new GridmixKey(GridmixKey.DATA,102,2),new GridmixKey(GridmixKey.DATA,100,2)));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCompareGridmixJob() throws Exception {
Configuration conf=new Configuration();
Path outRoot=new Path("target");
JobStory jobDesc=mock(JobStory.class);
when(jobDesc.getName()).thenReturn("JobName");
when(jobDesc.getJobConf()).thenReturn(new JobConf(conf));
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
GridmixJob j1=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,0);
GridmixJob j2=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,0);
GridmixJob j3=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,1);
GridmixJob j4=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,1);
assertTrue(j1.equals(j2));
assertEquals(0,j1.compareTo(j2));
assertFalse(j1.equals(j3));
assertEquals(-1,j1.compareTo(j3));
assertEquals(-1,j1.compareTo(j4));
}
InternalCallVerifier EqualityVerifier
/**
* Test disabled task heap options configuration in {@link GridmixJob}.
*/
@Test @SuppressWarnings("deprecation") public void testJavaHeapOptionsDisabled() throws Exception {
Configuration gridmixConf=new Configuration();
gridmixConf.setBoolean(GridmixJob.GRIDMIX_TASK_JVM_OPTIONS_ENABLE,false);
gridmixConf.set(MRJobConfig.MAP_JAVA_OPTS,"-Xmx1m");
gridmixConf.set(MRJobConfig.REDUCE_JAVA_OPTS,"-Xmx2m");
gridmixConf.set(JobConf.MAPRED_TASK_JAVA_OPTS,"-Xmx3m");
final JobConf originalConf=new JobConf();
originalConf.set(MRJobConfig.MAP_JAVA_OPTS,"-Xmx10m");
originalConf.set(MRJobConfig.REDUCE_JAVA_OPTS,"-Xmx20m");
originalConf.set(JobConf.MAPRED_TASK_JAVA_OPTS,"-Xmx30m");
MockJob story=new MockJob(originalConf){
public JobConf getJobConf(){
return originalConf;
}
}
;
GridmixJob job=new DummyGridmixJob(gridmixConf,story);
Job simulatedJob=job.getJob();
Configuration simulatedConf=simulatedJob.getConfiguration();
assertEquals("Map heap options works when disabled!","-Xmx1m",simulatedConf.get(MRJobConfig.MAP_JAVA_OPTS));
assertEquals("Reduce heap options works when disabled!","-Xmx2m",simulatedConf.get(MRJobConfig.REDUCE_JAVA_OPTS));
assertEquals("Task heap options works when disabled!","-Xmx3m",simulatedConf.get(JobConf.MAPRED_TASK_JAVA_OPTS));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test {@link TotalHeapUsageEmulatorPlugin}.
*/
@Test public void testTotalHeapUsageEmulatorPlugin() throws Exception {
Configuration conf=new Configuration();
ResourceCalculatorPlugin monitor=new DummyResourceCalculatorPlugin();
long maxHeapUsage=1024 * TotalHeapUsageEmulatorPlugin.ONE_MB;
conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,maxHeapUsage);
monitor.setConf(conf);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F);
long targetHeapUsageInMB=200;
FakeProgressive fakeProgress=new FakeProgressive();
FakeHeapUsageEmulatorCore fakeCore=new FakeHeapUsageEmulatorCore();
FakeHeapUsageEmulatorPlugin heapPlugin=new FakeHeapUsageEmulatorPlugin(fakeCore);
ResourceUsageMetrics invalidUsage=TestResourceUsageEmulators.createMetrics(0);
heapPlugin.initialize(conf,invalidUsage,null,null);
int numCallsPre=fakeCore.getNumCalls();
long heapUsagePre=fakeCore.getHeapUsageInMB();
heapPlugin.emulate();
int numCallsPost=fakeCore.getNumCalls();
long heapUsagePost=fakeCore.getHeapUsageInMB();
assertEquals("Disabled heap usage emulation plugin works!",numCallsPre,numCallsPost);
assertEquals("Disabled heap usage emulation plugin works!",heapUsagePre,heapUsagePost);
float progress=heapPlugin.getProgress();
assertEquals("Invalid progress of disabled cumulative heap usage emulation " + "plugin!",1.0f,progress,0f);
Boolean failed=null;
invalidUsage=TestResourceUsageEmulators.createMetrics(maxHeapUsage + TotalHeapUsageEmulatorPlugin.ONE_MB);
try {
heapPlugin.initialize(conf,invalidUsage,monitor,null);
failed=false;
}
catch ( Exception e) {
failed=true;
}
assertNotNull("Fail case failure!",failed);
assertTrue("Expected failure!",failed);
ResourceUsageMetrics metrics=TestResourceUsageEmulators.createMetrics(targetHeapUsageInMB * TotalHeapUsageEmulatorPlugin.ONE_MB);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,10);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,0.2F);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,5);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0.5F);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,120,2);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,0.5F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,10);
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0.25F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,0.5F);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,162,6);
fakeProgress=new FakeProgressive();
conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F);
conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,0.25F);
heapPlugin.initialize(conf,metrics,monitor,fakeProgress);
fakeCore.resetFake();
long initHeapUsage=fakeCore.getHeapUsageInMB();
long initNumCallsUsage=fakeCore.getNumCalls();
testEmulationBoundary(0F,fakeCore,fakeProgress,heapPlugin,initHeapUsage,initNumCallsUsage,"[no-op, 0 progress]");
testEmulationBoundary(0.24F,fakeCore,fakeProgress,heapPlugin,initHeapUsage,initNumCallsUsage,"[no-op, 24% progress]");
testEmulationBoundary(0.25F,fakeCore,fakeProgress,heapPlugin,targetHeapUsageInMB / 4,1,"[op, 25% progress]");
testEmulationBoundary(0.80F,fakeCore,fakeProgress,heapPlugin,(targetHeapUsageInMB * 4) / 5,2,"[op, 80% progress]");
testEmulationBoundary(1F,fakeCore,fakeProgress,heapPlugin,targetHeapUsageInMB,3,"[op, 100% progress]");
}
InternalCallVerifier EqualityVerifier
/**
* Test {@link TotalHeapUsageEmulatorPlugin}'s core heap usage emulation
* engine.
*/
@Test public void testHeapUsageEmulator() throws IOException {
FakeHeapUsageEmulatorCore heapEmulator=new FakeHeapUsageEmulatorCore();
long testSizeInMB=10;
long previousHeap=heapEmulator.getHeapUsageInMB();
heapEmulator.load(testSizeInMB);
long currentHeap=heapEmulator.getHeapUsageInMB();
assertEquals("Default heap emulator failed to load 10mb",previousHeap + testSizeInMB,currentHeap);
heapEmulator.resetFake();
assertEquals("Default heap emulator failed to reset",0,heapEmulator.getHeapUsageInMB());
}
InternalCallVerifier EqualityVerifier
/**
* Test {@link ClusterSummarizer}.
*/
@Test public void testClusterSummarizer() throws IOException {
ClusterSummarizer cs=new ClusterSummarizer();
Configuration conf=new Configuration();
String jt="test-jt:1234";
String nn="test-nn:5678";
conf.set(JTConfig.JT_IPC_ADDRESS,jt);
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,nn);
cs.start(conf);
assertEquals("JT name mismatch",jt,cs.getJobTrackerInfo());
assertEquals("NN name mismatch",nn,cs.getNamenodeInfo());
ClusterStats cStats=ClusterStats.getClusterStats();
conf.set(JTConfig.JT_IPC_ADDRESS,"local");
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"local");
JobClient jc=new JobClient(conf);
cStats.setClusterMetric(jc.getClusterStatus());
cs.update(cStats);
assertEquals("Cluster summary test failed!",1,cs.getMaxMapTasks());
assertEquals("Cluster summary test failed!",1,cs.getMaxReduceTasks());
assertEquals("Cluster summary test failed!",1,cs.getNumActiveTrackers());
assertEquals("Cluster summary test failed!",0,cs.getNumBlacklistedTrackers());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test {@link DataStatistics}.
*/
@Test public void testDataStatistics() throws Exception {
DataStatistics stats=new DataStatistics(10,2,true);
assertEquals("Data size mismatch",10,stats.getDataSize());
assertEquals("Num files mismatch",2,stats.getNumFiles());
assertTrue("Compression configuration mismatch",stats.isDataCompressed());
stats=new DataStatistics(100,5,false);
assertEquals("Data size mismatch",100,stats.getDataSize());
assertEquals("Num files mismatch",5,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
Configuration conf=new Configuration();
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp"));
Path testDir=new Path(rootTempDir,"testDataStatistics");
FileSystem fs=testDir.getFileSystem(conf);
fs.delete(testDir,true);
Path testInputDir=new Path(testDir,"test");
fs.mkdirs(testInputDir);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
Boolean failed=null;
try {
GenerateData.publishDataStatistics(testInputDir,1024L,conf);
failed=false;
}
catch ( RuntimeException e) {
failed=true;
}
assertNotNull("Expected failure!",failed);
assertTrue("Compression data publishing error",failed);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
stats=GenerateData.publishDataStatistics(testInputDir,1024L,conf);
assertEquals("Data size mismatch",0,stats.getDataSize());
assertEquals("Num files mismatch",0,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
Path inputDataFile=new Path(testInputDir,"test");
long size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello bye").size();
stats=GenerateData.publishDataStatistics(testInputDir,-1,conf);
assertEquals("Data size mismatch",size,stats.getDataSize());
assertEquals("Num files mismatch",1,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
failed=null;
try {
GenerateData.publishDataStatistics(testInputDir,1234L,conf);
failed=false;
}
catch ( RuntimeException e) {
failed=true;
}
assertNotNull("Expected failure!",failed);
assertTrue("Compression data publishing error",failed);
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false);
fs.delete(inputDataFile,false);
inputDataFile=new Path(testInputDir,"test.gz");
size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello").size();
stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf);
assertEquals("Data size mismatch",size,stats.getDataSize());
assertEquals("Num files mismatch",1,stats.getNumFiles());
assertFalse("Compression configuration mismatch",stats.isDataCompressed());
CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true);
stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf);
assertEquals("Data size mismatch",size,stats.getDataSize());
assertEquals("Num files mismatch",1,stats.getNumFiles());
assertTrue("Compression configuration mismatch",stats.isDataCompressed());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test {@link ExecutionSummarizer}.
*/
@Test @SuppressWarnings({"unchecked","rawtypes"}) public void testExecutionSummarizer() throws IOException {
Configuration conf=new Configuration();
ExecutionSummarizer es=new ExecutionSummarizer();
assertEquals("ExecutionSummarizer init failed",Summarizer.NA,es.getCommandLineArgsString());
long startTime=System.currentTimeMillis();
String[] initArgs=new String[]{"-Xmx20m","-Dtest.args='test'"};
es=new ExecutionSummarizer(initArgs);
assertEquals("ExecutionSummarizer init failed","-Xmx20m -Dtest.args='test'",es.getCommandLineArgsString());
assertTrue("Start time mismatch",es.getStartTime() >= startTime);
assertTrue("Start time mismatch",es.getStartTime() <= System.currentTimeMillis());
es.update(null);
assertEquals("ExecutionSummarizer init failed",0,es.getSimulationStartTime());
testExecutionSummarizer(0,0,0,0,0,0,0,es);
long simStartTime=System.currentTimeMillis();
es.start(null);
assertTrue("Simulation start time mismatch",es.getSimulationStartTime() >= simStartTime);
assertTrue("Simulation start time mismatch",es.getSimulationStartTime() <= System.currentTimeMillis());
JobStats stats=generateFakeJobStats(1,10,true,false);
es.update(stats);
testExecutionSummarizer(1,10,0,1,1,0,0,es);
stats=generateFakeJobStats(5,1,false,false);
es.update(stats);
testExecutionSummarizer(6,11,0,2,1,1,0,es);
stats=generateFakeJobStats(1,1,true,true);
es.update(stats);
testExecutionSummarizer(7,12,0,3,1,1,1,es);
stats=generateFakeJobStats(2,2,false,true);
es.update(stats);
testExecutionSummarizer(9,14,0,4,1,1,2,es);
JobFactory factory=new FakeJobFactory(conf);
factory.numJobsInTrace=3;
Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp"));
Path testDir=new Path(rootTempDir,"testGridmixSummary");
Path testTraceFile=new Path(testDir,"test-trace.json");
FileSystem fs=FileSystem.getLocal(conf);
fs.create(testTraceFile).close();
UserResolver resolver=new RoundRobinUserResolver();
DataStatistics dataStats=new DataStatistics(100,2,true);
String policy=GridmixJobSubmissionPolicy.REPLAY.name();
conf.set(GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY,policy);
es.finalize(factory,testTraceFile.toString(),1024L,resolver,dataStats,conf);
assertEquals("Mismtach in num jobs in trace",3,es.getNumJobsInTrace());
String tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature());
Path qPath=fs.makeQualified(testTraceFile);
assertEquals("Mismatch in trace filename",qPath.toString(),es.getInputTraceLocation());
assertEquals("Mismatch in expected data size","1 K",es.getExpectedDataSize());
assertEquals("Mismatch in input data statistics",ExecutionSummarizer.stringifyDataStatistics(dataStats),es.getInputDataStatistics());
assertEquals("Mismatch in user resolver",resolver.getClass().getName(),es.getUserResolver());
assertEquals("Mismatch in policy",policy,es.getJobSubmissionPolicy());
es.finalize(factory,testTraceFile.toString(),1024 * 1024 * 1024* 10L,resolver,dataStats,conf);
assertEquals("Mismatch in expected data size","10 G",es.getExpectedDataSize());
fs.delete(testTraceFile,false);
try {
Thread.sleep(1000);
}
catch ( InterruptedException ie) {
}
fs.create(testTraceFile).close();
es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf);
assertEquals("Mismatch in trace data size",Summarizer.NA,es.getExpectedDataSize());
assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature()));
tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature());
testTraceFile=new Path(testDir,"test-trace2.json");
fs.create(testTraceFile).close();
es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf);
assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature()));
tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString());
assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature());
es.finalize(factory,"-",0L,resolver,dataStats,conf);
assertEquals("Mismatch in trace signature",Summarizer.NA,es.getInputTraceSignature());
assertEquals("Mismatch in trace file location",Summarizer.NA,es.getInputTraceLocation());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test Pseudo Local File System methods like getFileStatus(), create(),
* open(), exists() for valid file paths and invalid file paths.
* @throws IOException
*/
@Test public void testPseudoLocalFsFileNames() throws IOException {
PseudoLocalFs pfs=new PseudoLocalFs();
Configuration conf=new Configuration();
conf.setClass("fs.pseudo.impl",PseudoLocalFs.class,FileSystem.class);
Path path=new Path("pseudo:///myPsedoFile.1234");
FileSystem testFs=path.getFileSystem(conf);
assertEquals("Failed to obtain a pseudo local file system object from path",pfs.getUri().getScheme(),testFs.getUri().getScheme());
path=new Path("file:///myPsedoFile.12345");
validateGetFileStatus(pfs,path,false);
validateCreate(pfs,path,false);
validateOpen(pfs,path,false);
validateExists(pfs,path,false);
path=new Path("pseudo:///myPsedoFile");
validateGetFileStatus(pfs,path,false);
validateCreate(pfs,path,false);
validateOpen(pfs,path,false);
validateExists(pfs,path,false);
path=new Path("pseudo:///myPsedoFile.txt");
validateGetFileStatus(pfs,path,false);
validateCreate(pfs,path,false);
validateOpen(pfs,path,false);
validateExists(pfs,path,false);
long fileSize=231456;
path=PseudoLocalFs.generateFilePath("my.Psedo.File",fileSize);
assertEquals("generateFilePath() failed.",fileSize,pfs.validateFileNameFormat(path));
validateGetFileStatus(pfs,path,true);
validateCreate(pfs,path,true);
validateOpen(pfs,path,true);
validateExists(pfs,path,true);
path=new Path("myPsedoFile.1237");
path=path.makeQualified(pfs);
validateGetFileStatus(pfs,path,true);
validateCreate(pfs,path,true);
validateOpen(pfs,path,true);
validateExists(pfs,path,true);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test if a file on PseudoLocalFs of a specific size can be opened and read.
* Validate the size of the data read.
* Test the read methods of {@link PseudoLocalFs.RandomInputStream}.
* @throws Exception
*/
@Test public void testPseudoLocalFsFileSize() throws Exception {
long fileSize=10000;
Path path=PseudoLocalFs.generateFilePath("myPsedoFile",fileSize);
PseudoLocalFs pfs=new PseudoLocalFs();
pfs.create(path);
InputStream in=pfs.open(path,0);
long totalSize=0;
while (in.read() >= 0) {
++totalSize;
}
in.close();
assertEquals("File size mismatch with read().",fileSize,totalSize);
in=pfs.open(path,0);
totalSize=0;
byte[] b=new byte[1024];
int bytesRead=in.read(b);
while (bytesRead >= 0) {
totalSize+=bytesRead;
bytesRead=in.read(b);
}
assertEquals("File size mismatch with read(byte[]).",fileSize,totalSize);
}
InternalCallVerifier EqualityVerifier
/**
* Test if {@link RandomTextDataGenerator} can generate random words of
* desired size.
*/
@Test public void testRandomTextDataGenerator(){
RandomTextDataGenerator rtdg=new RandomTextDataGenerator(10,0L,5);
List words=rtdg.getRandomWords();
assertEquals("List size mismatch",10,words.size());
Set wordsSet=new HashSet(words);
assertEquals("List size mismatch due to duplicates",10,wordsSet.size());
for ( String word : wordsSet) {
assertEquals("Word size mismatch",5,word.length());
}
}
InternalCallVerifier BooleanVerifier
/**
* Test if {@link RandomTextDataGenerator} can generate different words given
* different seeds.
*/
@Test public void testRandomTextDataGeneratorUniqueness(){
RandomTextDataGenerator rtdg1=new RandomTextDataGenerator(10,1L,5);
Set words1=new HashSet(rtdg1.getRandomWords());
RandomTextDataGenerator rtdg2=new RandomTextDataGenerator(10,0L,5);
Set words2=new HashSet(rtdg2.getRandomWords());
assertFalse("List size mismatch across lists",words1.equals(words2));
}
InternalCallVerifier BooleanVerifier
/**
* Test if {@link RandomTextDataGenerator} can generate same words given the
* same list-size, word-length and seed.
*/
@Test public void testRandomTextDataGeneratorRepeatability(){
RandomTextDataGenerator rtdg1=new RandomTextDataGenerator(10,0L,5);
List words1=rtdg1.getRandomWords();
RandomTextDataGenerator rtdg2=new RandomTextDataGenerator(10,0L,5);
List words2=rtdg2.getRandomWords();
assertTrue("List mismatch",words1.equals(words2));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test {@link LoadJob.ResourceUsageMatcherRunner}.
*/
@Test @SuppressWarnings("unchecked") public void testResourceUsageMatcherRunner() throws Exception {
Configuration conf=new Configuration();
FakeProgressive progress=new FakeProgressive();
conf.setClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,DummyResourceCalculatorPlugin.class,ResourceCalculatorPlugin.class);
conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,TestResourceUsageEmulatorPlugin.class,ResourceUsageEmulatorPlugin.class);
long currentTime=System.currentTimeMillis();
TaskAttemptID id=new TaskAttemptID("test",1,TaskType.MAP,1,1);
StatusReporter reporter=new DummyReporter(progress);
TaskInputOutputContext context=new MapContextImpl(conf,id,null,null,null,reporter,null);
FakeResourceUsageMatcherRunner matcher=new FakeResourceUsageMatcherRunner(context,null);
String identifier=TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
long initTime=TestResourceUsageEmulatorPlugin.testInitialization(identifier,conf);
assertTrue("ResourceUsageMatcherRunner failed to initialize the" + " configured plugin",initTime > currentTime);
assertEquals("Progress mismatch in ResourceUsageMatcherRunner",0,progress.getProgress(),0D);
progress.setProgress(0.01f);
currentTime=System.currentTimeMillis();
matcher.test();
long emulateTime=TestResourceUsageEmulatorPlugin.testEmulation(identifier,conf);
assertTrue("ProgressBasedResourceUsageMatcher failed to load and emulate" + " the configured plugin",emulateTime > currentTime);
}
InternalCallVerifier EqualityVerifier
/**
* Test {@link CumulativeCpuUsageEmulatorPlugin}'s core CPU usage emulation
* engine.
*/
@Test public void testCpuUsageEmulator() throws IOException {
long target=100000L;
int unitUsage=50;
FakeCpuUsageEmulatorCore fakeCpuEmulator=new FakeCpuUsageEmulatorCore();
fakeCpuEmulator.setUnitUsage(unitUsage);
FakeResourceUsageMonitor fakeMonitor=new FakeResourceUsageMonitor(fakeCpuEmulator);
fakeCpuEmulator.calibrate(fakeMonitor,target);
assertEquals("Fake calibration failed",100,fakeMonitor.getCumulativeCpuTime());
assertEquals("Fake calibration failed",100,fakeCpuEmulator.getCpuUsage());
assertEquals("Fake calibration failed",2,fakeCpuEmulator.getNumCalls());
}
InternalCallVerifier BooleanVerifier
/**
* Test {@link ResourceUsageMatcher}.
*/
@Test public void testResourceUsageMatcher() throws Exception {
ResourceUsageMatcher matcher=new ResourceUsageMatcher();
Configuration conf=new Configuration();
conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,TestResourceUsageEmulatorPlugin.class,ResourceUsageEmulatorPlugin.class);
long currentTime=System.currentTimeMillis();
matcher.configure(conf,null,null,null);
matcher.matchResourceUsage();
String id=TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER;
long result=TestResourceUsageEmulatorPlugin.testInitialization(id,conf);
assertTrue("Resource usage matcher failed to initialize the configured" + " plugin",result > currentTime);
result=TestResourceUsageEmulatorPlugin.testEmulation(id,conf);
assertTrue("Resource usage matcher failed to load and emulate the" + " configured plugin",result > currentTime);
conf.setStrings(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,TestCpu.class.getName() + "," + TestOthers.class.getName());
matcher.configure(conf,null,null,null);
long time1=TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID,conf);
long time2=TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,conf);
assertTrue("Resource usage matcher failed to initialize the configured" + " plugins in order",time1 < time2);
matcher.matchResourceUsage();
time1=TestResourceUsageEmulatorPlugin.testInitialization(TestCpu.ID,conf);
time2=TestResourceUsageEmulatorPlugin.testInitialization(TestOthers.ID,conf);
assertTrue("Resource usage matcher failed to load the configured plugins",time1 < time2);
}
InternalCallVerifier EqualityVerifier
/**
* Test {@link CumulativeCpuUsageEmulatorPlugin}.
*/
@Test public void testCumulativeCpuUsageEmulatorPlugin() throws Exception {
Configuration conf=new Configuration();
long targetCpuUsage=1000L;
int unitCpuUsage=50;
FakeProgressive fakeProgress=new FakeProgressive();
FakeCpuUsageEmulatorCore fakeCore=new FakeCpuUsageEmulatorCore();
fakeCore.setUnitUsage(unitCpuUsage);
CumulativeCpuUsageEmulatorPlugin cpuPlugin=new CumulativeCpuUsageEmulatorPlugin(fakeCore);
ResourceUsageMetrics invalidUsage=createMetrics(0);
cpuPlugin.initialize(conf,invalidUsage,null,null);
int numCallsPre=fakeCore.getNumCalls();
long cpuUsagePre=fakeCore.getCpuUsage();
cpuPlugin.emulate();
int numCallsPost=fakeCore.getNumCalls();
long cpuUsagePost=fakeCore.getCpuUsage();
assertEquals("Disabled cumulative CPU usage emulation plugin works!",numCallsPre,numCallsPost);
assertEquals("Disabled cumulative CPU usage emulation plugin works!",cpuUsagePre,cpuUsagePost);
float progress=cpuPlugin.getProgress();
assertEquals("Invalid progress of disabled cumulative CPU usage emulation " + "plugin!",1.0f,progress,0f);
ResourceUsageMetrics metrics=createMetrics(targetCpuUsage);
ResourceCalculatorPlugin monitor=new FakeResourceUsageMonitor(fakeCore);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,cpuPlugin,targetCpuUsage,targetCpuUsage / unitCpuUsage);
conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,0.2F);
testEmulationAccuracy(conf,fakeCore,monitor,metrics,cpuPlugin,targetCpuUsage,targetCpuUsage / unitCpuUsage);
fakeProgress=new FakeProgressive();
fakeCore.reset();
fakeCore.setUnitUsage(1);
conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,0.25F);
cpuPlugin.initialize(conf,metrics,monitor,fakeProgress);
long initCpuUsage=monitor.getCumulativeCpuTime();
long initNumCalls=fakeCore.getNumCalls();
testEmulationBoundary(0F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[no-op, 0 progress]");
testEmulationBoundary(0.24F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[no-op, 24% progress]");
testEmulationBoundary(0.25F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[op, 25% progress]");
testEmulationBoundary(0.80F,fakeCore,fakeProgress,cpuPlugin,410,410,"[op, 80% progress]");
testEmulationBoundary(1F,fakeCore,fakeProgress,cpuPlugin,targetCpuUsage,targetCpuUsage,"[op, 100% progress]");
fakeProgress=new FakeProgressive();
fakeCore.reset();
fakeCore.setUnitUsage(unitCpuUsage);
conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,0.40F);
cpuPlugin.initialize(conf,metrics,monitor,fakeProgress);
initCpuUsage=monitor.getCumulativeCpuTime();
initNumCalls=fakeCore.getNumCalls();
testEmulationBoundary(0F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[no-op, 0 progress]");
testEmulationBoundary(0.39F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[no-op, 39% progress]");
testEmulationBoundary(0.40F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[op, 40% progress]");
testEmulationBoundary(0.90F,fakeCore,fakeProgress,cpuPlugin,700,700 / unitCpuUsage,"[op, 90% progress]");
testEmulationBoundary(1F,fakeCore,fakeProgress,cpuPlugin,targetCpuUsage,targetCpuUsage / unitCpuUsage,"[op, 100% progress]");
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testMapTasksOnlySleepJobs() throws Exception {
Configuration configuration=GridmixTestUtils.mrvl.getConfig();
DebugJobProducer jobProducer=new DebugJobProducer(5,configuration);
configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY,true);
UserGroupInformation ugi=UserGroupInformation.getLoginUser();
JobStory story;
int seq=1;
while ((story=jobProducer.getNextJob()) != null) {
GridmixJob gridmixJob=JobCreator.SLEEPJOB.createGridmixJob(configuration,0,story,new Path("ignored"),ugi,seq++);
gridmixJob.buildSplits(null);
Job job=gridmixJob.call();
assertEquals(0,job.getNumReduceTasks());
}
jobProducer.close();
assertEquals(6,seq);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSubmitterResolver() throws Exception {
final UserResolver rslv=new SubmitterUserResolver();
assertFalse(rslv.needsTargetUsersList());
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
assertEquals(ugi,rslv.getTargetUgi((UserGroupInformation)null));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testGetAssignedJobId() throws Exception {
JobConf jc=new JobConf();
Job j=new Job(jc);
assertNull(j.getAssignedJobID());
org.apache.hadoop.mapreduce.Job mockjob=mock(org.apache.hadoop.mapreduce.Job.class);
org.apache.hadoop.mapreduce.JobID jid=new org.apache.hadoop.mapreduce.JobID("test",0);
when(mockjob.getJobID()).thenReturn(jid);
j.setJob(mockjob);
JobID expected=new JobID("test",0);
assertEquals(expected,j.getAssignedJobID());
verify(mockjob).getJobID();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testAddingDependingJob() throws Exception {
Job job_1=getCopyJob();
ArrayList dependingJobs=new ArrayList();
JobControl jc=new JobControl("Test");
jc.addJob(job_1);
Assert.assertEquals(Job.WAITING,job_1.getState());
Assert.assertTrue(job_1.addDependingJob(new Job(job_1.getJobConf(),dependingJobs)));
}
InternalCallVerifier EqualityVerifier
@SuppressWarnings("deprecation") @Test(timeout=30000) public void testJobState() throws Exception {
Job job_1=getCopyJob();
JobControl jc=new JobControl("Test");
jc.addJob(job_1);
Assert.assertEquals(Job.WAITING,job_1.getState());
job_1.setState(Job.SUCCESS);
Assert.assertEquals(Job.WAITING,job_1.getState());
org.apache.hadoop.mapreduce.Job mockjob=mock(org.apache.hadoop.mapreduce.Job.class);
org.apache.hadoop.mapreduce.JobID jid=new org.apache.hadoop.mapreduce.JobID("test",0);
when(mockjob.getJobID()).thenReturn(jid);
job_1.setJob(mockjob);
Assert.assertEquals("job_test_0000",job_1.getMapredJobID());
job_1.setMapredJobID("job_test_0001");
Assert.assertEquals("job_test_0000",job_1.getMapredJobID());
jc.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testSetReducerWithReducerByValueAsFalse() throws Exception {
JobConf jobConf=new JobConf();
JobConf reducerConf=new JobConf();
Chain.setReducer(jobConf,MyReducer.class,Object.class,Object.class,Object.class,Object.class,false,reducerConf);
boolean reduceByValue=reducerConf.getBoolean("chain.reducer.byValue",true);
Assert.assertEquals("It should set chain.reducer.byValue as false " + "in reducerConf when we give value as false",false,reduceByValue);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetReducerWithReducerByValueAsTrue() throws Exception {
JobConf jobConf=new JobConf();
JobConf reducerConf=new JobConf();
Chain.setReducer(jobConf,MyReducer.class,Object.class,Object.class,Object.class,Object.class,true,reducerConf);
boolean reduceByValue=reducerConf.getBoolean("chain.reducer.byValue",false);
Assert.assertEquals("It should set chain.reducer.byValue as true " + "in reducerConf when we give value as true",true,reduceByValue);
}
InternalCallVerifier EqualityVerifier
/**
* Test is key-field-based partitioned works with empty key.
*/
@Test public void testEmptyKey() throws Exception {
KeyFieldBasedPartitioner kfbp=new KeyFieldBasedPartitioner();
JobConf conf=new JobConf();
conf.setInt("num.key.fields.for.partition",10);
kfbp.configure(conf);
assertEquals("Empty key should map to 0th partition",0,kfbp.getPartition(new Text(),new Text(),10));
}
InternalCallVerifier EqualityVerifier
@Test public void testMultiConfigure(){
KeyFieldBasedPartitioner kfbp=new KeyFieldBasedPartitioner();
JobConf conf=new JobConf();
conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS,"-k1,1");
kfbp.setConf(conf);
Text key=new Text("foo\tbar");
Text val=new Text("val");
int partNum=kfbp.getPartition(key,val,4096);
kfbp.configure(conf);
assertEquals(partNum,kfbp.getPartition(key,val,4096));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test configuration for db. should works DBConfiguration.* parameters.
*/
@Test(timeout=5000) public void testSetInput(){
JobConf configuration=new JobConf();
String[] fieldNames={"field1","field2"};
DBInputFormat.setInput(configuration,NullDBWritable.class,"table","conditions","orderBy",fieldNames);
assertEquals("org.apache.hadoop.mapred.lib.db.DBInputFormat$NullDBWritable",configuration.getClass(DBConfiguration.INPUT_CLASS_PROPERTY,null).getName());
assertEquals("table",configuration.get(DBConfiguration.INPUT_TABLE_NAME_PROPERTY,null));
String[] fields=configuration.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY);
assertEquals("field1",fields[0]);
assertEquals("field2",fields[1]);
assertEquals("conditions",configuration.get(DBConfiguration.INPUT_CONDITIONS_PROPERTY,null));
assertEquals("orderBy",configuration.get(DBConfiguration.INPUT_ORDER_BY_PROPERTY,null));
configuration=new JobConf();
DBInputFormat.setInput(configuration,NullDBWritable.class,"query","countQuery");
assertEquals("query",configuration.get(DBConfiguration.INPUT_QUERY,null));
assertEquals("countQuery",configuration.get(DBConfiguration.INPUT_COUNT_QUERY,null));
JobConf jConfiguration=new JobConf();
DBConfiguration.configureDB(jConfiguration,"driverClass","dbUrl","user","password");
assertEquals("driverClass",jConfiguration.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
assertEquals("dbUrl",jConfiguration.get(DBConfiguration.URL_PROPERTY));
assertEquals("user",jConfiguration.get(DBConfiguration.USERNAME_PROPERTY));
assertEquals("password",jConfiguration.get(DBConfiguration.PASSWORD_PROPERTY));
jConfiguration=new JobConf();
DBConfiguration.configureDB(jConfiguration,"driverClass","dbUrl");
assertEquals("driverClass",jConfiguration.get(DBConfiguration.DRIVER_CLASS_PROPERTY));
assertEquals("dbUrl",jConfiguration.get(DBConfiguration.URL_PROPERTY));
assertNull(jConfiguration.get(DBConfiguration.USERNAME_PROPERTY));
assertNull(jConfiguration.get(DBConfiguration.PASSWORD_PROPERTY));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test DBRecordReader. This reader should creates keys, values, know about position..
*/
@SuppressWarnings("unchecked") @Test(timeout=5000) public void testDBRecordReader() throws Exception {
JobConf job=mock(JobConf.class);
DBConfiguration dbConfig=mock(DBConfiguration.class);
String[] fields={"field1","filed2"};
@SuppressWarnings("rawtypes") DBRecordReader reader=new DBInputFormat().new DBRecordReader(new DBInputSplit(),NullDBWritable.class,job,DriverForTest.getConnection(),dbConfig,"condition",fields,"table");
LongWritable key=reader.createKey();
assertEquals(0,key.get());
DBWritable value=reader.createValue();
assertEquals("org.apache.hadoop.mapred.lib.db.DBInputFormat$NullDBWritable",value.getClass().getName());
assertEquals(0,reader.getPos());
assertFalse(reader.next(key,value));
}
InternalCallVerifier EqualityVerifier
/**
* test DBInputFormat class. Class should split result for chunks
* @throws Exception
*/
@Test(timeout=10000) public void testDBInputFormat() throws Exception {
JobConf configuration=new JobConf();
setupDriver(configuration);
DBInputFormat format=new DBInputFormat();
format.setConf(configuration);
format.setConf(configuration);
DBInputFormat.DBInputSplit splitter=new DBInputFormat.DBInputSplit(1,10);
Reporter reporter=mock(Reporter.class);
RecordReader reader=format.getRecordReader(splitter,configuration,reporter);
configuration.setInt(MRJobConfig.NUM_MAPS,3);
InputSplit[] lSplits=format.getSplits(configuration,3);
assertEquals(5,lSplits[0].getLength());
assertEquals(3,lSplits.length);
assertEquals(LongWritable.class,reader.createKey().getClass());
assertEquals(0,reader.getPos());
assertEquals(0,reader.getProgress(),0.001);
reader.close();
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test org.apache.hadoop.mapred.pipes.Submitter
* @throws Exception
*/
@Test public void testSubmitter() throws Exception {
JobConf conf=new JobConf();
File[] psw=cleanTokenPasswordFile();
System.setProperty("test.build.data","target/tmp/build/TEST_SUBMITTER_MAPPER/data");
conf.set("hadoop.log.dir","target/tmp");
Submitter.setIsJavaMapper(conf,false);
Submitter.setIsJavaReducer(conf,false);
Submitter.setKeepCommandFile(conf,false);
Submitter.setIsJavaRecordReader(conf,false);
Submitter.setIsJavaRecordWriter(conf,false);
PipesPartitioner partitioner=new PipesPartitioner();
partitioner.configure(conf);
Submitter.setJavaPartitioner(conf,partitioner.getClass());
assertEquals(PipesPartitioner.class,(Submitter.getJavaPartitioner(conf)));
SecurityManager securityManager=System.getSecurityManager();
PrintStream oldps=System.out;
ByteArrayOutputStream out=new ByteArrayOutputStream();
ExitUtil.disableSystemExit();
try {
System.setOut(new PrintStream(out));
Submitter.main(new String[0]);
fail();
}
catch ( ExitUtil.ExitException e) {
assertTrue(out.toString().contains(""));
assertTrue(out.toString().contains("bin/hadoop pipes"));
assertTrue(out.toString().contains("[-input ] // Input directory"));
assertTrue(out.toString().contains("[-output ] // Output directory"));
assertTrue(out.toString().contains("[-jar // jar filename"));
assertTrue(out.toString().contains("[-inputformat ] // InputFormat class"));
assertTrue(out.toString().contains("[-map ] // Java Map class"));
assertTrue(out.toString().contains("[-partitioner ] // Java Partitioner"));
assertTrue(out.toString().contains("[-reduce ] // Java Reduce class"));
assertTrue(out.toString().contains("[-writer ] // Java RecordWriter"));
assertTrue(out.toString().contains("[-program ] // executable URI"));
assertTrue(out.toString().contains("[-reduces ] // number of reduces"));
assertTrue(out.toString().contains("[-lazyOutput ] // createOutputLazily"));
assertTrue(out.toString().contains("-conf specify an application configuration file"));
assertTrue(out.toString().contains("-D use value for given property"));
assertTrue(out.toString().contains("-fs specify a namenode"));
assertTrue(out.toString().contains("-jt specify a job tracker"));
assertTrue(out.toString().contains("-files specify comma separated files to be copied to the map reduce cluster"));
assertTrue(out.toString().contains("-libjars specify comma separated jar files to include in the classpath."));
assertTrue(out.toString().contains("-archives specify comma separated archives to be unarchived on the compute machines."));
}
finally {
System.setOut(oldps);
System.setSecurityManager(securityManager);
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
try {
File fCommand=getFileCommand(null);
String[] args=new String[22];
File input=new File(workSpace + File.separator + "input");
if (!input.exists()) {
Assert.assertTrue(input.createNewFile());
}
File outPut=new File(workSpace + File.separator + "output");
FileUtil.fullyDelete(outPut);
args[0]="-input";
args[1]=input.getAbsolutePath();
args[2]="-output";
args[3]=outPut.getAbsolutePath();
args[4]="-inputformat";
args[5]="org.apache.hadoop.mapred.TextInputFormat";
args[6]="-map";
args[7]="org.apache.hadoop.mapred.lib.IdentityMapper";
args[8]="-partitioner";
args[9]="org.apache.hadoop.mapred.pipes.PipesPartitioner";
args[10]="-reduce";
args[11]="org.apache.hadoop.mapred.lib.IdentityReducer";
args[12]="-writer";
args[13]="org.apache.hadoop.mapred.TextOutputFormat";
args[14]="-program";
args[15]=fCommand.getAbsolutePath();
args[16]="-reduces";
args[17]="2";
args[18]="-lazyOutput";
args[19]="lazyOutput";
args[20]="-jobconf";
args[21]="mapreduce.pipes.isjavarecordwriter=false,mapreduce.pipes.isjavarecordreader=false";
Submitter.main(args);
fail();
}
catch ( ExitUtil.ExitException e) {
assertEquals(e.status,0);
}
finally {
System.setOut(oldps);
System.setSecurityManager(securityManager);
}
}
InternalCallVerifier EqualityVerifier
/**
* test PipesPartitioner
* test set and get data from PipesPartitioner
*/
@Test public void testPipesPartitioner(){
PipesPartitioner partitioner=new PipesPartitioner();
JobConf configuration=new JobConf();
Submitter.getJavaPartitioner(configuration);
partitioner.configure(new JobConf());
IntWritable iw=new IntWritable(4);
assertEquals(0,partitioner.getPartition(iw,new Text("test"),2));
PipesPartitioner.setNextPartition(3);
assertEquals(3,partitioner.getPartition(iw,new Text("test"),2));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test PipesMapRunner test the transfer data from reader
* @throws Exception
*/
@Test public void testRunner() throws Exception {
File[] psw=cleanTokenPasswordFile();
try {
RecordReader rReader=new ReaderPipesMapRunner();
JobConf conf=new JobConf();
conf.set(Submitter.IS_JAVA_RR,"true");
conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName);
CombineOutputCollector output=new CombineOutputCollector(new Counters.Counter(),new Progress());
FileSystem fs=new RawLocalFileSystem();
fs.setConf(conf);
Writer wr=new Writer(conf,fs.create(new Path(workSpace + File.separator + "outfile")),IntWritable.class,Text.class,null,null,true);
output.setWriter(wr);
File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationRunnableStub");
conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath());
Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service"));
TokenCache.setJobToken(token,conf.getCredentials());
conf.setBoolean(MRJobConfig.SKIP_RECORDS,true);
TestTaskReporter reporter=new TestTaskReporter();
PipesMapRunner runner=new PipesMapRunner();
initStdOut(conf);
runner.configure(conf);
runner.run(rReader,output,reporter);
String stdOut=readStdOut(conf);
assertTrue(stdOut.contains("CURRENT_PROTOCOL_VERSION:0"));
assertTrue(stdOut.contains("Key class:org.apache.hadoop.io.FloatWritable"));
assertTrue(stdOut.contains("Value class:org.apache.hadoop.io.NullWritable"));
assertTrue(stdOut.contains("value:0.0"));
assertTrue(stdOut.contains("value:9.0"));
}
finally {
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test org.apache.hadoop.mapred.pipes.Application
* test a internal functions: MessageType.REGISTER_COUNTER, INCREMENT_COUNTER, STATUS, PROGRESS...
* @throws Throwable
*/
@Test public void testApplication() throws Throwable {
JobConf conf=new JobConf();
RecordReader rReader=new Reader();
File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationStub");
TestTaskReporter reporter=new TestTaskReporter();
File[] psw=cleanTokenPasswordFile();
try {
conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName);
conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath());
Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service"));
TokenCache.setJobToken(token,conf.getCredentials());
FakeCollector output=new FakeCollector(new Counters.Counter(),new Progress());
FileSystem fs=new RawLocalFileSystem();
fs.setConf(conf);
Writer wr=new Writer(conf,fs.create(new Path(workSpace.getAbsolutePath() + File.separator + "outfile")),IntWritable.class,Text.class,null,null,true);
output.setWriter(wr);
conf.set(Submitter.PRESERVE_COMMANDFILE,"true");
initStdOut(conf);
Application,Writable,IntWritable,Text> application=new Application,Writable,IntWritable,Text>(conf,rReader,output,reporter,IntWritable.class,Text.class);
application.getDownlink().flush();
application.getDownlink().mapItem(new IntWritable(3),new Text("txt"));
application.getDownlink().flush();
application.waitForFinish();
wr.close();
String stdOut=readStdOut(conf);
assertTrue(stdOut.contains("key:3"));
assertTrue(stdOut.contains("value:txt"));
assertEquals(1.0,reporter.getProgress(),0.01);
assertNotNull(reporter.getCounter("group","name"));
assertEquals(reporter.getStatus(),"PROGRESS");
stdOut=readFile(new File(workSpace.getAbsolutePath() + File.separator + "outfile"));
assertEquals(0.55f,rReader.getProgress(),0.001);
application.getDownlink().close();
Entry entry=output.getCollect().entrySet().iterator().next();
assertEquals(123,entry.getKey().get());
assertEquals("value",entry.getValue().toString());
try {
application.abort(new Throwable());
fail();
}
catch ( IOException e) {
assertEquals("pipe child exception",e.getMessage());
}
}
finally {
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* test org.apache.hadoop.mapred.pipes.PipesReducer
* test the transfer of data: key and value
* @throws Exception
*/
@Test public void testPipesReduser() throws Exception {
File[] psw=cleanTokenPasswordFile();
JobConf conf=new JobConf();
try {
Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service"));
TokenCache.setJobToken(token,conf.getCredentials());
File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeReducerStub");
conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath());
PipesReducer reducer=new PipesReducer();
reducer.configure(conf);
BooleanWritable bw=new BooleanWritable(true);
conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName);
initStdOut(conf);
conf.setBoolean(MRJobConfig.SKIP_RECORDS,true);
CombineOutputCollector output=new CombineOutputCollector(new Counters.Counter(),new Progress());
Reporter reporter=new TestTaskReporter();
List texts=new ArrayList();
texts.add(new Text("first"));
texts.add(new Text("second"));
texts.add(new Text("third"));
reducer.reduce(bw,texts.iterator(),output,reporter);
reducer.close();
String stdOut=readStdOut(conf);
assertTrue(stdOut.contains("reducer key :true"));
assertTrue(stdOut.contains("reduce value :first"));
assertTrue(stdOut.contains("reduce value :second"));
assertTrue(stdOut.contains("reduce value :third"));
}
finally {
if (psw != null) {
for ( File file : psw) {
file.deleteOnExit();
}
}
}
}
BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* test PipesNonJavaInputFormat
*/
@Test public void testFormat() throws IOException {
PipesNonJavaInputFormat inputFormat=new PipesNonJavaInputFormat();
JobConf conf=new JobConf();
Reporter reporter=mock(Reporter.class);
RecordReader reader=inputFormat.getRecordReader(new FakeSplit(),conf,reporter);
assertEquals(0.0f,reader.getProgress(),0.001);
File input1=new File(workSpace + File.separator + "input1");
if (!input1.getParentFile().exists()) {
Assert.assertTrue(input1.getParentFile().mkdirs());
}
if (!input1.exists()) {
Assert.assertTrue(input1.createNewFile());
}
File input2=new File(workSpace + File.separator + "input2");
if (!input2.exists()) {
Assert.assertTrue(input2.createNewFile());
}
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,StringUtils.escapeString(input1.getAbsolutePath()) + "," + StringUtils.escapeString(input2.getAbsolutePath()));
InputSplit[] splits=inputFormat.getSplits(conf,2);
assertEquals(2,splits.length);
PipesNonJavaInputFormat.PipesDummyRecordReader dummyRecordReader=new PipesNonJavaInputFormat.PipesDummyRecordReader(conf,splits[0]);
assertNull(dummyRecordReader.createKey());
assertNull(dummyRecordReader.createValue());
assertEquals(0,dummyRecordReader.getPos());
assertEquals(0.0,dummyRecordReader.getProgress(),0.001);
assertTrue(dummyRecordReader.next(new FloatWritable(2.0f),NullWritable.get()));
assertEquals(2.0,dummyRecordReader.getProgress(),0.001);
dummyRecordReader.close();
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testClusterWithLocalClientProvider() throws Exception {
Configuration conf=new Configuration();
try {
conf.set(MRConfig.FRAMEWORK_NAME,"incorrect");
new Cluster(conf);
fail("Cluster should not be initialized with incorrect framework name");
}
catch ( IOException e) {
}
conf.set(MRConfig.FRAMEWORK_NAME,"local");
Cluster cluster=new Cluster(conf);
assertTrue(cluster.getClient() instanceof LocalJobRunner);
cluster.close();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
/**
* Verify counter value works
*/
@Test public void testCounterValue(){
final int NUMBER_TESTS=100;
final int NUMBER_INC=10;
final Random rand=new Random();
for (int i=0; i < NUMBER_TESTS; i++) {
long initValue=rand.nextInt();
long expectedValue=initValue;
Counter counter=new Counters().findCounter("test","foo");
counter.setValue(initValue);
assertEquals("Counter value is not initialized correctly",expectedValue,counter.getValue());
for (int j=0; j < NUMBER_INC; j++) {
int incValue=rand.nextInt();
counter.increment(incValue);
expectedValue+=incValue;
assertEquals("Counter value is not incremented correctly",expectedValue,counter.getValue());
}
expectedValue=rand.nextInt();
counter.setValue(expectedValue);
assertEquals("Counter value is not set correctly",expectedValue,counter.getValue());
}
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testCountersIncrement(){
Counters fCounters=new Counters();
Counter fCounter=fCounters.findCounter(FRAMEWORK_COUNTER);
fCounter.setValue(100);
Counter gCounter=fCounters.findCounter("test","foo");
gCounter.setValue(200);
Counters counters=new Counters();
counters.incrAllCounters(fCounters);
Counter counter;
for ( CounterGroup cg : fCounters) {
CounterGroup group=counters.getGroup(cg.getName());
if (group.getName().equals("test")) {
counter=counters.findCounter("test","foo");
assertEquals(200,counter.getValue());
}
else {
counter=counters.findCounter(FRAMEWORK_COUNTER);
assertEquals(100,counter.getValue());
}
}
}
InternalCallVerifier IdentityVerifier
@Test public void testUGICredentialsPropogation() throws Exception {
Credentials creds=new Credentials();
Token> token=mock(Token.class);
Text tokenService=new Text("service");
Text secretName=new Text("secret");
byte secret[]=new byte[]{};
creds.addToken(tokenService,token);
creds.addSecretKey(secretName,secret);
UserGroupInformation.getLoginUser().addCredentials(creds);
JobConf jobConf=new JobConf();
Job job=new Job(jobConf);
assertSame(token,job.getCredentials().getToken(tokenService));
assertSame(secret,job.getCredentials().getSecretKey(secretName));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testJobToString() throws IOException, InterruptedException {
Cluster cluster=mock(Cluster.class);
ClientProtocol client=mock(ClientProtocol.class);
when(cluster.getClient()).thenReturn(client);
JobID jobid=new JobID("1014873536921",6);
JobStatus status=new JobStatus(jobid,0.0f,0.0f,0.0f,0.0f,State.FAILED,JobPriority.NORMAL,"root","TestJobToString","job file","tracking url");
when(client.getJobStatus(jobid)).thenReturn(status);
when(client.getTaskReports(jobid,TaskType.MAP)).thenReturn(new TaskReport[0]);
when(client.getTaskReports(jobid,TaskType.REDUCE)).thenReturn(new TaskReport[0]);
when(client.getTaskCompletionEvents(jobid,0,10)).thenReturn(new TaskCompletionEvent[0]);
Job job=Job.getInstance(cluster,status,new JobConf());
Assert.assertNotNull(job.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobMonitorAndPrint() throws Exception {
JobStatus jobStatus_1=new JobStatus(new JobID("job_000",1),1f,0.1f,0.1f,0f,State.RUNNING,JobPriority.HIGH,"tmp-user","tmp-jobname","tmp-queue","tmp-jobfile","tmp-url",true);
JobStatus jobStatus_2=new JobStatus(new JobID("job_000",1),1f,1f,1f,1f,State.SUCCEEDED,JobPriority.HIGH,"tmp-user","tmp-jobname","tmp-queue","tmp-jobfile","tmp-url",true);
doAnswer(new Answer(){
@Override public TaskCompletionEvent[] answer( InvocationOnMock invocation) throws Throwable {
return new TaskCompletionEvent[0];
}
}
).when(job).getTaskCompletionEvents(anyInt(),anyInt());
doReturn(new TaskReport[5]).when(job).getTaskReports(isA(TaskType.class));
when(clientProtocol.getJobStatus(any(JobID.class))).thenReturn(jobStatus_1,jobStatus_2);
Layout layout=Logger.getRootLogger().getAppender("stdout").getLayout();
ByteArrayOutputStream os=new ByteArrayOutputStream();
WriterAppender appender=new WriterAppender(layout,os);
appender.setThreshold(Level.ALL);
Logger qlogger=Logger.getLogger(Job.class);
qlogger.addAppender(appender);
job.monitorAndPrintJob();
qlogger.removeAppender(appender);
LineNumberReader r=new LineNumberReader(new StringReader(os.toString()));
String line;
boolean foundHundred=false;
boolean foundComplete=false;
boolean foundUber=false;
String uberModeMatch="uber mode : true";
String progressMatch="map 100% reduce 100%";
String completionMatch="completed successfully";
while ((line=r.readLine()) != null) {
if (line.contains(uberModeMatch)) {
foundUber=true;
}
foundHundred=line.contains(progressMatch);
if (foundHundred) break;
}
line=r.readLine();
foundComplete=line.contains(completionMatch);
assertTrue(foundUber);
assertTrue(foundHundred);
assertTrue(foundComplete);
System.out.println("The output of job.toString() is : \n" + job.toString());
assertTrue(job.toString().contains("Number of maps: 5\n"));
assertTrue(job.toString().contains("Number of reduces: 5\n"));
}
InternalCallVerifier EqualityVerifier
@Test public void testLargeSort() throws Exception {
String[] args=new String[0];
int[] ioSortMbs={128,256,1536};
for ( int ioSortMb : ioSortMbs) {
Configuration conf=new Configuration(cluster.getConfig());
conf.setInt(MRJobConfig.IO_SORT_MB,ioSortMb);
conf.setInt(LargeSorter.NUM_MAP_TASKS,1);
conf.setInt(LargeSorter.MBS_PER_MAP,ioSortMb);
assertEquals("Large sort failed for " + ioSortMb,0,ToolRunner.run(conf,new LargeSorter(),args));
}
}
InternalCallVerifier BooleanVerifier
/**
* Run a test with a misconfigured number of mappers.
* Expect failure.
*/
@Test public void testInvalidMultiMapParallelism() throws Exception {
Job job=Job.getInstance();
Path inputPath=createMultiMapsInput();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
job.setMapperClass(StressMapper.class);
job.setReducerClass(CountingReducer.class);
job.setNumReduceTasks(1);
LocalJobRunner.setLocalMaxRunningMaps(job,-6);
FileInputFormat.addInputPath(job,inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
boolean success=job.waitForCompletion(true);
assertFalse("Job succeeded somehow",success);
}
InternalCallVerifier BooleanVerifier
/**
* Test case for zero mappers
*/
@Test public void testEmptyMaps() throws Exception {
Job job=Job.getInstance();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
job.setInputFormatClass(EmptyInputFormat.class);
job.setNumReduceTasks(1);
FileOutputFormat.setOutputPath(job,outputPath);
boolean success=job.waitForCompletion(true);
assertTrue("Empty job should work",success);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test that the GC counter actually increments when we know that we've
* spent some time in the GC during the mapper.
*/
@Test public void testGcCounter() throws Exception {
Path inputPath=getInputPath();
Path outputPath=getOutputPath();
Configuration conf=new Configuration();
FileSystem fs=FileSystem.getLocal(conf);
if (fs.exists(outputPath)) {
fs.delete(outputPath,true);
}
if (fs.exists(inputPath)) {
fs.delete(inputPath,true);
}
createInputFile(inputPath,0,20);
Job job=Job.getInstance();
job.setMapperClass(GCMapper.class);
job.setNumReduceTasks(0);
job.getConfiguration().set(MRJobConfig.IO_SORT_MB,"25");
FileInputFormat.addInputPath(job,inputPath);
FileOutputFormat.setOutputPath(job,outputPath);
boolean ret=job.waitForCompletion(true);
assertTrue("job failed",ret);
Counter gcCounter=job.getCounters().findCounter(TaskCounter.GC_TIME_MILLIS);
assertNotNull(gcCounter);
assertTrue("No time spent in gc",gcCounter.getValue() > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testJobSubmission() throws Exception {
JobConf conf=new JobConf();
Job job=new Job(conf);
job.setInputFormatClass(TestInputFormat.class);
job.setMapperClass(TestMapper.class);
job.setOutputFormatClass(TestOutputFormat.class);
job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(IntWritable.class);
job.waitForCompletion(true);
assertTrue(job.isSuccessful());
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCombiner() throws Exception {
if (!new File(TEST_ROOT_DIR).mkdirs()) {
throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR);
}
File in=new File(TEST_ROOT_DIR,"input");
if (!in.mkdirs()) {
throw new RuntimeException("Could not create test dir: " + in);
}
File out=new File(TEST_ROOT_DIR,"output");
PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt")));
pw.println("A|a,1");
pw.println("A|b,2");
pw.println("B|a,3");
pw.println("B|b,4");
pw.println("B|c,5");
pw.close();
JobConf conf=new JobConf();
conf.set("mapreduce.framework.name","local");
Job job=new Job(conf);
TextInputFormat.setInputPaths(job,new Path(in.getPath()));
TextOutputFormat.setOutputPath(job,new Path(out.getPath()));
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setInputFormatClass(TextInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setGroupingComparatorClass(GroupComparator.class);
job.setCombinerKeyGroupingComparatorClass(GroupComparator.class);
job.setCombinerClass(Combiner.class);
job.getConfiguration().setInt("min.num.spills.for.combine",0);
job.submit();
job.waitForCompletion(false);
if (job.isSuccessful()) {
Counters counters=job.getCounters();
long combinerInputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_INPUT_RECORDS").getValue();
long combinerOutputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_OUTPUT_RECORDS").getValue();
Assert.assertTrue(combinerInputRecords > 0);
Assert.assertTrue(combinerInputRecords > combinerOutputRecords);
BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-r-00000")));
Set output=new HashSet();
String line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNotNull(line);
output.add(line.substring(0,1) + line.substring(4,5));
line=br.readLine();
Assert.assertNull(line);
br.close();
Set expected=new HashSet();
expected.add("A2");
expected.add("B5");
Assert.assertEquals(expected,output);
}
else {
Assert.fail("Job failed");
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testPluginAbility(){
try {
JobConf jobConf=new JobConf();
jobConf.setClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN,TestShufflePlugin.TestShuffleConsumerPlugin.class,ShuffleConsumerPlugin.class);
ShuffleConsumerPlugin shuffleConsumerPlugin=null;
Class extends ShuffleConsumerPlugin> clazz=jobConf.getClass(MRConfig.SHUFFLE_CONSUMER_PLUGIN,Shuffle.class,ShuffleConsumerPlugin.class);
assertNotNull("Unable to get " + MRConfig.SHUFFLE_CONSUMER_PLUGIN,clazz);
shuffleConsumerPlugin=ReflectionUtils.newInstance(clazz,jobConf);
assertNotNull("Unable to load " + MRConfig.SHUFFLE_CONSUMER_PLUGIN,shuffleConsumerPlugin);
}
catch ( Exception e) {
assertTrue("Threw exception:" + e,false);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier IgnoredMethod HybridVerifier
/**
* Tests context.setStatus method.
* TODO fix testcase
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test @Ignore public void testContextStatus() throws IOException, InterruptedException, ClassNotFoundException {
Path test=new Path(testRootTempDir,"testContextStatus");
int numMaps=1;
Job job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,0);
job.setMapperClass(MyMapper.class);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
TaskReport[] reports=job.getTaskReports(TaskType.MAP);
assertEquals(numMaps,reports.length);
assertEquals(myStatus,reports[0].getState());
int numReduces=1;
job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,numReduces);
job.setMapperClass(DataCopyMapper.class);
job.setReducerClass(DataCopyReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMaxMapAttempts(1);
job.setMaxReduceAttempts(0);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Tests new MapReduce reduce task's context.getProgress() method.
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test public void testReduceContextProgress() throws IOException, InterruptedException, ClassNotFoundException {
int numTasks=1;
Path test=new Path(testRootTempDir,"testReduceContextProgress");
Job job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numTasks,numTasks,INPUT);
job.setMapperClass(ProgressCheckerMapper.class);
job.setReducerClass(ProgressCheckerReducer.class);
job.setMapOutputKeyClass(Text.class);
job.setMaxMapAttempts(1);
job.setMaxReduceAttempts(1);
job.waitForCompletion(true);
assertTrue("Job failed",job.isSuccessful());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that child queues are converted too during conversion of the parent
* queue
*/
@Test public void testFromYarnQueue(){
org.apache.hadoop.yarn.api.records.QueueInfo child=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
List children=new ArrayList();
children.add(child);
Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);
org.apache.hadoop.mapreduce.QueueInfo returned=TypeConverter.fromYarn(queueInfo,new Configuration());
Assert.assertEquals("QueueInfo children weren't properly converted",returned.getQueueChildren().size(),1);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFromYarnApplicationReport(){
ApplicationId mockAppId=mock(ApplicationId.class);
when(mockAppId.getClusterTimestamp()).thenReturn(12345L);
when(mockAppId.getId()).thenReturn(6789);
ApplicationReport mockReport=mock(ApplicationReport.class);
when(mockReport.getTrackingUrl()).thenReturn("dummy-tracking-url");
when(mockReport.getApplicationId()).thenReturn(mockAppId);
when(mockReport.getYarnApplicationState()).thenReturn(YarnApplicationState.KILLED);
when(mockReport.getUser()).thenReturn("dummy-user");
when(mockReport.getQueue()).thenReturn("dummy-queue");
String jobFile="dummy-path/job.xml";
try {
JobStatus status=TypeConverter.fromYarn(mockReport,jobFile);
}
catch ( NullPointerException npe) {
Assert.fail("Type converstion from YARN fails for jobs without " + "ApplicationUsageReport");
}
ApplicationResourceUsageReport appUsageRpt=Records.newRecord(ApplicationResourceUsageReport.class);
Resource r=Records.newRecord(Resource.class);
r.setMemory(2048);
appUsageRpt.setNeededResources(r);
appUsageRpt.setNumReservedContainers(1);
appUsageRpt.setNumUsedContainers(3);
appUsageRpt.setReservedResources(r);
appUsageRpt.setUsedResources(r);
when(mockReport.getApplicationResourceUsageReport()).thenReturn(appUsageRpt);
JobStatus status=TypeConverter.fromYarn(mockReport,jobFile);
Assert.assertNotNull("fromYarn returned null status",status);
Assert.assertEquals("jobFile set incorrectly","dummy-path/job.xml",status.getJobFile());
Assert.assertEquals("queue set incorrectly","dummy-queue",status.getQueue());
Assert.assertEquals("trackingUrl set incorrectly","dummy-tracking-url",status.getTrackingUrl());
Assert.assertEquals("user set incorrectly","dummy-user",status.getUsername());
Assert.assertEquals("schedulingInfo set incorrectly","dummy-tracking-url",status.getSchedulingInfo());
Assert.assertEquals("jobId set incorrectly",6789,status.getJobID().getId());
Assert.assertEquals("state set incorrectly",JobStatus.State.KILLED,status.getState());
Assert.assertEquals("needed mem info set incorrectly",2048,status.getNeededMem());
Assert.assertEquals("num rsvd slots info set incorrectly",1,status.getNumReservedSlots());
Assert.assertEquals("num used slots info set incorrectly",3,status.getNumUsedSlots());
Assert.assertEquals("rsvd mem info set incorrectly",2048,status.getReservedMem());
Assert.assertEquals("used mem info set incorrectly",2048,status.getUsedMem());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFromYarnQueueInfo(){
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo=Records.newRecord(org.apache.hadoop.yarn.api.records.QueueInfo.class);
queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED);
org.apache.hadoop.mapreduce.QueueInfo returned=TypeConverter.fromYarn(queueInfo,new Configuration());
Assert.assertEquals("queueInfo translation didn't work.",returned.getState().toString(),queueInfo.getQueueState().toString().toLowerCase());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFromYarn() throws Exception {
int appStartTime=612354;
int appFinishTime=612355;
YarnApplicationState state=YarnApplicationState.RUNNING;
ApplicationId applicationId=ApplicationId.newInstance(0,0);
ApplicationReport applicationReport=Records.newRecord(ApplicationReport.class);
applicationReport.setApplicationId(applicationId);
applicationReport.setYarnApplicationState(state);
applicationReport.setStartTime(appStartTime);
applicationReport.setFinishTime(appFinishTime);
applicationReport.setUser("TestTypeConverter-user");
ApplicationResourceUsageReport appUsageRpt=Records.newRecord(ApplicationResourceUsageReport.class);
Resource r=Records.newRecord(Resource.class);
r.setMemory(2048);
appUsageRpt.setNeededResources(r);
appUsageRpt.setNumReservedContainers(1);
appUsageRpt.setNumUsedContainers(3);
appUsageRpt.setReservedResources(r);
appUsageRpt.setUsedResources(r);
applicationReport.setApplicationResourceUsageReport(appUsageRpt);
JobStatus jobStatus=TypeConverter.fromYarn(applicationReport,"dummy-jobfile");
Assert.assertEquals(appStartTime,jobStatus.getStartTime());
Assert.assertEquals(appFinishTime,jobStatus.getFinishTime());
Assert.assertEquals(state.toString(),jobStatus.getState().toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFromYarnJobReport() throws Exception {
int jobStartTime=612354;
int jobFinishTime=612355;
JobState state=JobState.RUNNING;
JobId jobId=Records.newRecord(JobId.class);
JobReport jobReport=Records.newRecord(JobReport.class);
ApplicationId applicationId=ApplicationId.newInstance(0,0);
jobId.setAppId(applicationId);
jobId.setId(0);
jobReport.setJobId(jobId);
jobReport.setJobState(state);
jobReport.setStartTime(jobStartTime);
jobReport.setFinishTime(jobFinishTime);
jobReport.setUser("TestTypeConverter-user");
JobStatus jobStatus=TypeConverter.fromYarn(jobReport,"dummy-jobfile");
Assert.assertEquals(jobStartTime,jobStatus.getStartTime());
Assert.assertEquals(jobFinishTime,jobStatus.getFinishTime());
Assert.assertEquals(state.toString(),jobStatus.getState().toString());
}
InternalCallVerifier BooleanVerifier
@Test public void testClusterWithYarnClientProtocolProvider() throws Exception {
Configuration conf=new Configuration(false);
Cluster cluster=null;
try {
cluster=new Cluster(conf);
}
catch ( Exception e) {
throw new Exception("Failed to initialize a local runner w/o a cluster framework key",e);
}
try {
assertTrue("client is not a LocalJobRunner",cluster.getClient() instanceof LocalJobRunner);
}
finally {
if (cluster != null) {
cluster.close();
}
}
try {
conf=new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
cluster=new Cluster(conf);
ClientProtocol client=cluster.getClient();
assertTrue("client is a YARNRunner",client instanceof YARNRunner);
}
catch ( IOException e) {
}
finally {
if (cluster != null) {
cluster.close();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testClusterGetDelegationToken() throws Exception {
Configuration conf=new Configuration(false);
Cluster cluster=null;
try {
conf=new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
cluster=new Cluster(conf);
YARNRunner yrunner=(YARNRunner)cluster.getClient();
GetDelegationTokenResponse getDTResponse=recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
org.apache.hadoop.yarn.api.records.Token rmDTToken=recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Token.class);
rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
rmDTToken.setKind("Testclusterkind");
rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
rmDTToken.setService("0.0.0.0:8032");
getDTResponse.setRMDelegationToken(rmDTToken);
final ApplicationClientProtocol cRMProtocol=mock(ApplicationClientProtocol.class);
when(cRMProtocol.getDelegationToken(any(GetDelegationTokenRequest.class))).thenReturn(getDTResponse);
ResourceMgrDelegate rmgrDelegate=new ResourceMgrDelegate(new YarnConfiguration(conf)){
@Override protected void serviceStart() throws Exception {
assertTrue(this.client instanceof YarnClientImpl);
((YarnClientImpl)this.client).setRMClient(cRMProtocol);
}
}
;
yrunner.setResourceMgrDelegate(rmgrDelegate);
Token t=cluster.getDelegationToken(new Text(" "));
assertTrue("Token kind is instead " + t.getKind().toString(),"Testclusterkind".equals(t.getKind().toString()));
}
finally {
if (cluster != null) {
cluster.close();
}
}
}
InternalCallVerifier BooleanVerifier
@Test public void testDelete() throws Exception {
FileSystem fs=mock(FileSystem.class);
Path chkloc=new Path("/chk/chk0");
when(fs.delete(eq(chkloc),eq(false))).thenReturn(true);
Path base=new Path("/otherchk");
FSCheckpointID id=new FSCheckpointID(chkloc);
FSCheckpointService chk=new FSCheckpointService(fs,base,new SimpleNamingService("chk0"),(short)1);
assertTrue(chk.delete(id));
verify(fs).delete(eq(chkloc),eq(false));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testDetermineTimestamps() throws IOException {
Job job=Job.getInstance(conf);
job.addCacheFile(firstCacheFile.toUri());
job.addCacheFile(secondCacheFile.toUri());
Configuration jobConf=job.getConfiguration();
Map statCache=new HashMap();
ClientDistributedCacheManager.determineTimestamps(jobConf,statCache);
FileStatus firstStatus=statCache.get(firstCacheFile.toUri());
FileStatus secondStatus=statCache.get(secondCacheFile.toUri());
Assert.assertNotNull(firstStatus);
Assert.assertNotNull(secondStatus);
Assert.assertEquals(2,statCache.size());
String expected=firstStatus.getModificationTime() + "," + secondStatus.getModificationTime();
Assert.assertEquals(expected,jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS));
}
InternalCallVerifier EqualityVerifier
/**
* simple test JobPriorityChangeEvent and JobPriorityChange
* @throws Exception
*/
@Test(timeout=10000) public void testJobPriorityChange() throws Exception {
org.apache.hadoop.mapreduce.JobID jid=new JobID("001",1);
JobPriorityChangeEvent test=new JobPriorityChangeEvent(jid,JobPriority.LOW);
assertEquals(test.getJobId().toString(),jid.toString());
assertEquals(test.getPriority(),JobPriority.LOW);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testJobQueueChange() throws Exception {
org.apache.hadoop.mapreduce.JobID jid=new JobID("001",1);
JobQueueChangeEvent test=new JobQueueChangeEvent(jid,"newqueue");
assertEquals(test.getJobId().toString(),jid.toString());
assertEquals(test.getJobQueueName(),"newqueue");
}
InternalCallVerifier EqualityVerifier
/**
* simple test TaskUpdatedEvent and TaskUpdated
* @throws Exception
*/
@Test(timeout=10000) public void testTaskUpdated() throws Exception {
JobID jid=new JobID("001",1);
TaskID tid=new TaskID(jid,TaskType.REDUCE,2);
TaskUpdatedEvent test=new TaskUpdatedEvent(tid,1234L);
assertEquals(test.getTaskId().toString(),tid.toString());
assertEquals(test.getFinishTime(),1234L);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testEvents() throws Exception {
EventReader reader=new EventReader(new DataInputStream(new ByteArrayInputStream(getEvents())));
HistoryEvent e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.JOB_PRIORITY_CHANGED));
assertEquals("ID",((JobPriorityChange)e.getDatum()).jobid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.JOB_STATUS_CHANGED));
assertEquals("ID",((JobStatusChanged)e.getDatum()).jobid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.TASK_UPDATED));
assertEquals("ID",((TaskUpdated)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.JOB_KILLED));
assertEquals("ID",((JobUnsuccessfulCompletion)e.getDatum()).jobid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED));
assertEquals("task_1_2_r03_4",((TaskAttemptStarted)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED));
assertEquals("task_1_2_r03_4",((TaskAttemptFinished)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED));
assertEquals("task_1_2_r03_4",((TaskAttemptStarted)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED));
assertEquals("task_1_2_r03_4",((TaskAttemptFinished)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
e=reader.getNextEvent();
assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED));
assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString());
reader.close();
}
InternalCallVerifier EqualityVerifier
/**
* test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished
* @throws Exception
*/
@Test(timeout=10000) public void testTaskAttemptFinishedEvent() throws Exception {
JobID jid=new JobID("001",1);
TaskID tid=new TaskID(jid,TaskType.REDUCE,2);
TaskAttemptID taskAttemptId=new TaskAttemptID(tid,3);
Counters counters=new Counters();
TaskAttemptFinishedEvent test=new TaskAttemptFinishedEvent(taskAttemptId,TaskType.REDUCE,"TEST",123L,"RAKNAME","HOSTNAME","STATUS",counters);
assertEquals(test.getAttemptId().toString(),taskAttemptId.toString());
assertEquals(test.getCounters(),counters);
assertEquals(test.getFinishTime(),123L);
assertEquals(test.getHostname(),"HOSTNAME");
assertEquals(test.getRackName(),"RAKNAME");
assertEquals(test.getState(),"STATUS");
assertEquals(test.getTaskId(),tid);
assertEquals(test.getTaskStatus(),"TEST");
assertEquals(test.getTaskType(),TaskType.REDUCE);
}
InternalCallVerifier BooleanVerifier
@Test public void testSigTermedFunctionality() throws IOException {
AppContext mockedContext=Mockito.mock(AppContext.class);
JHEventHandlerForSigtermTest jheh=new JHEventHandlerForSigtermTest(mockedContext,0);
JobId jobId=Mockito.mock(JobId.class);
jheh.addToFileMap(jobId);
final int numEvents=4;
JobHistoryEvent events[]=new JobHistoryEvent[numEvents];
for (int i=0; i < numEvents; ++i) {
events[i]=getEventToEnqueue(jobId);
jheh.handle(events[i]);
}
jheh.stop();
assertTrue("handleEvent should've been called only 4 times but was " + jheh.eventsHandled,jheh.eventsHandled == 4);
jheh=new JHEventHandlerForSigtermTest(mockedContext,0);
Job job=Mockito.mock(Job.class);
Mockito.when(mockedContext.getJob(jobId)).thenReturn(job);
ApplicationId mockAppId=Mockito.mock(ApplicationId.class);
Mockito.when(mockAppId.getClusterTimestamp()).thenReturn(1000l);
Mockito.when(jobId.getAppId()).thenReturn(mockAppId);
jheh.addToFileMap(jobId);
jheh.setForcejobCompletion(true);
for (int i=0; i < numEvents; ++i) {
events[i]=getEventToEnqueue(jobId);
jheh.handle(events[i]);
}
jheh.stop();
assertTrue("handleEvent should've been called only 5 times but was " + jheh.eventsHandled,jheh.eventsHandled == 5);
assertTrue("Last event handled wasn't JobUnsuccessfulCompletionEvent",jheh.lastEventHandled.getHistoryEvent() instanceof JobUnsuccessfulCompletionEvent);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=50000) public void testDefaultFsIsUsedForHistory() throws Exception {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,dfsCluster.getURI().toString());
FileOutputStream os=new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,"file:///");
TestParams t=new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,t.dfsWorkDir);
JHEvenHandlerForTest realJheh=new JHEvenHandlerForTest(t.mockAppContext,0,false);
JHEvenHandlerForTest jheh=spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh,new JobHistoryEvent(t.jobId,new AMStartedEvent(t.appAttemptId,200,t.containerId,"nmhost",3000,4000)));
handleEvent(jheh,new JobHistoryEvent(t.jobId,new JobFinishedEvent(TypeConverter.fromYarn(t.jobId),0,0,0,0,0,new Counters(),new Counters(),new Counters())));
FileSystem dfsFileSystem=dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files",dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem=LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system",localFileSystem.exists(new Path(t.dfsWorkDir)));
}
finally {
jheh.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetHistoryIntermediateDoneDirForUser() throws IOException {
Configuration conf=new Configuration();
conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,"/mapred/history/done_intermediate");
conf.set(MRJobConfig.USER_NAME,System.getProperty("user.name"));
String pathStr=JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
Assert.assertEquals("/mapred/history/done_intermediate/" + System.getProperty("user.name"),pathStr);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,dfsCluster.getURI().toString());
FileOutputStream os=new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,"file:///");
pathStr=JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
Assert.assertEquals(dfsCluster.getURI().toString() + "/mapred/history/done_intermediate/" + System.getProperty("user.name"),pathStr);
}
InternalCallVerifier BooleanVerifier
@Test public void testEscapeJobSummary(){
summary.setJobName("aa\rbb\ncc\r\ndd");
String out=summary.getJobSummaryString();
LOG.info("summary: " + out);
Assert.assertFalse(out.contains("\r"));
Assert.assertFalse(out.contains("\n"));
Assert.assertTrue(out.contains("aa\\rbb\\ncc\\r\\ndd"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testDataDrivenDBInputFormat() throws Exception {
JobContext jobContext=mock(JobContext.class);
Configuration configuration=new Configuration();
configuration.setInt(MRJobConfig.NUM_MAPS,1);
when(jobContext.getConfiguration()).thenReturn(configuration);
DataDrivenDBInputFormat format=new DataDrivenDBInputFormat();
List splits=format.getSplits(jobContext);
assertEquals(1,splits.size());
DataDrivenDBInputSplit split=(DataDrivenDBInputSplit)splits.get(0);
assertEquals("1=1",split.getLowerClause());
assertEquals("1=1",split.getUpperClause());
configuration.setInt(MRJobConfig.NUM_MAPS,2);
DataDrivenDBInputFormat.setBoundingQuery(configuration,"query");
assertEquals("query",configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY));
Job job=mock(Job.class);
when(job.getConfiguration()).thenReturn(configuration);
DataDrivenDBInputFormat.setInput(job,NullDBWritable.class,"query","Bounding Query");
assertEquals("Bounding Query",configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY));
}
InternalCallVerifier EqualityVerifier
/**
* test generate sql script for OracleDBRecordReader.
*/
@Test(timeout=2000) public void testOracleDBRecordReader() throws Exception {
DBInputSplit splitter=new DBInputSplit(1,10);
Configuration configuration=new Configuration();
Connection connect=DriverForTest.getConnection();
DBConfiguration dbConfiguration=new DBConfiguration(configuration);
dbConfiguration.setInputOrderBy("Order");
String[] fields={"f1","f2"};
OracleDBRecordReader recorder=new OracleDBRecordReader(splitter,NullDBWritable.class,configuration,connect,dbConfiguration,"condition",fields,"table");
assertEquals("SELECT * FROM (SELECT a.*,ROWNUM dbif_rno FROM ( SELECT f1, f2 FROM table WHERE condition ORDER BY Order ) a WHERE rownum <= 10 ) WHERE dbif_rno > 1",recorder.getSelectQuery());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testOracleDataDrivenDBInputFormat() throws Exception {
OracleDataDrivenDBInputFormat format=new OracleDataDrivenDBInputFormatForTest();
testCommonSplitterTypes(format);
assertEquals(OracleDateSplitter.class,format.getSplitter(Types.TIMESTAMP).getClass());
assertEquals(OracleDateSplitter.class,format.getSplitter(Types.DATE).getClass());
assertEquals(OracleDateSplitter.class,format.getSplitter(Types.TIME).getClass());
}
InternalCallVerifier EqualityVerifier
/**
* test splitters from DataDrivenDBInputFormat. For different data types may
* be different splitter
*/
@Test(timeout=1000) public void testDataDrivenDBInputFormatSplitter(){
DataDrivenDBInputFormat format=new DataDrivenDBInputFormat();
testCommonSplitterTypes(format);
assertEquals(DateSplitter.class,format.getSplitter(Types.TIMESTAMP).getClass());
assertEquals(DateSplitter.class,format.getSplitter(Types.DATE).getClass());
assertEquals(DateSplitter.class,format.getSplitter(Types.TIME).getClass());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=2000) public void testBooleanSplitter() throws Exception {
BooleanSplitter splitter=new BooleanSplitter();
ResultSet result=mock(ResultSet.class);
when(result.getString(1)).thenReturn("result1");
List splits=splitter.split(configuration,result,"column");
assertSplits(new String[]{"column = FALSE column = FALSE","column IS NULL column IS NULL"},splits);
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
when(result.getBoolean(1)).thenReturn(true);
when(result.getBoolean(2)).thenReturn(false);
splits=splitter.split(configuration,result,"column");
assertEquals(0,splits.size());
when(result.getString(1)).thenReturn("result1");
when(result.getString(2)).thenReturn("result2");
when(result.getBoolean(1)).thenReturn(false);
when(result.getBoolean(2)).thenReturn(true);
splits=splitter.split(configuration,result,"column");
assertSplits(new String[]{"column = FALSE column = FALSE",".*column = TRUE"},splits);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that directories do not get included as part of getSplits()
*/
@Test public void testGetSplitsWithDirectory() throws Exception {
MiniDFSCluster dfs=null;
try {
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1).build();
dfs.waitActive();
dfs=new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1).build();
dfs.waitActive();
FileSystem fileSys=dfs.getFileSystem();
Path dir1=new Path("/dir1");
Path file=new Path("/dir1/file1");
Path dir2=new Path("/dir1/dir2");
if (!fileSys.mkdirs(dir1)) {
throw new IOException("Mkdirs failed to create " + dir1.toString());
}
FSDataOutputStream out=fileSys.create(file);
out.write(new byte[0]);
out.close();
if (!fileSys.mkdirs(dir2)) {
throw new IOException("Mkdirs failed to create " + dir2.toString());
}
DummyInputFormat inFormat=new DummyInputFormat();
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,"/dir1");
List splits=inFormat.getSplits(job);
assertEquals(1,splits.size());
CombineFileSplit fileSplit=(CombineFileSplit)splits.get(0);
assertEquals(1,fileSplit.getNumPaths());
assertEquals(file.getName(),fileSplit.getPath(0).getName());
assertEquals(0,fileSplit.getOffset(0));
assertEquals(0,fileSplit.getLength(0));
}
finally {
if (dfs != null) {
dfs.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test when the input file's length is 0.
*/
@Test public void testForEmptyFile() throws Exception {
Configuration conf=new Configuration();
FileSystem fileSys=FileSystem.get(conf);
Path file=new Path("test" + "/file");
FSDataOutputStream out=fileSys.create(file,true,conf.getInt("io.file.buffer.size",4096),(short)1,(long)BLOCKSIZE);
out.write(new byte[0]);
out.close();
DummyInputFormat inFormat=new DummyInputFormat();
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,"test");
List splits=inFormat.getSplits(job);
assertEquals(1,splits.size());
CombineFileSplit fileSplit=(CombineFileSplit)splits.get(0);
assertEquals(1,fileSplit.getNumPaths());
assertEquals(file.getName(),fileSplit.getPath(0).getName());
assertEquals(0,fileSplit.getOffset(0));
assertEquals(0,fileSplit.getLength(0));
fileSys.delete(file.getParent(),true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test when input files are from non-default file systems
*/
@Test public void testForNonDefaultFileSystem() throws Throwable {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,DUMMY_FS_URI);
assertEquals(DUMMY_FS_URI,FileSystem.getDefaultUri(conf).toString());
Path localPath=new Path("testFile1");
FileSystem lfs=FileSystem.getLocal(conf);
FSDataOutputStream dos=lfs.create(localPath);
dos.writeChars("Local file for CFIF");
dos.close();
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,lfs.makeQualified(localPath));
DummyInputFormat inFormat=new DummyInputFormat();
List splits=inFormat.getSplits(job);
assertTrue(splits.size() > 0);
for ( InputSplit s : splits) {
CombineFileSplit cfs=(CombineFileSplit)s;
for ( Path p : cfs.getPaths()) {
assertEquals(p.toUri().getScheme(),"file");
}
}
}
InternalCallVerifier BooleanVerifier
@SuppressWarnings("unchecked") @Test public void testProgressIsReportedIfInputASeriesOfEmptyFiles() throws IOException, InterruptedException {
JobConf conf=new JobConf();
Path[] paths=new Path[3];
File[] files=new File[3];
long[] fileLength=new long[3];
try {
for (int i=0; i < 3; i++) {
File dir=new File(outDir.toString());
dir.mkdir();
files[i]=new File(dir,"testfile" + i);
FileWriter fileWriter=new FileWriter(files[i]);
fileWriter.flush();
fileWriter.close();
fileLength[i]=i;
paths[i]=new Path(outDir + "/testfile" + i);
}
CombineFileSplit combineFileSplit=new CombineFileSplit(paths,fileLength);
TaskAttemptID taskAttemptID=Mockito.mock(TaskAttemptID.class);
TaskReporter reporter=Mockito.mock(TaskReporter.class);
TaskAttemptContextImpl taskAttemptContext=new TaskAttemptContextImpl(conf,taskAttemptID,reporter);
CombineFileRecordReader cfrr=new CombineFileRecordReader(combineFileSplit,taskAttemptContext,TextRecordReaderWrapper.class);
cfrr.initialize(combineFileSplit,taskAttemptContext);
verify(reporter).progress();
Assert.assertFalse(cfrr.nextKeyValue());
verify(reporter,times(3)).progress();
}
finally {
FileUtil.fullyDelete(new File(outDir.toString()));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws IOException, InterruptedException {
Job job=Job.getInstance(conf);
Random random=new Random();
long seed=random.nextLong();
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random,job);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
InputFormat format=new CombineSequenceFileInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
RecordReader reader=format.createRecordReader(split,context);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
try {
while (reader.nextKeyValue()) {
IntWritable key=reader.getCurrentKey();
BytesWritable value=reader.getCurrentValue();
assertNotNull("Value should not be null.",value);
final int k=key.get();
LOG.debug("read " + k);
assertFalse("Key in multiple partitions.",bits.get(k));
bits.set(k);
}
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testFormat() throws Exception {
Job job=Job.getInstance(new Configuration(defaultConf));
Random random=new Random();
long seed=random.nextLong();
LOG.info("seed = " + seed);
random.setSeed(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int length=10000;
final int numFiles=10;
createFiles(length,numFiles,random);
CombineTextInputFormat format=new CombineTextInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(length / 20) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
assertEquals("We got more than one splits!",1,splits.size());
InputSplit split=splits.get(0);
assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass());
BitSet bits=new BitSet(length);
LOG.debug("split= " + split);
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(split,context);
assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass());
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split);
reader.initialize(split,mcontext);
try {
int count=0;
while (reader.nextKeyValue()) {
LongWritable key=reader.getCurrentKey();
assertNotNull("Key should not be null.",key);
Text value=reader.getCurrentValue();
final int v=Integer.parseInt(value.toString());
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("split=" + split + " count="+ count);
}
finally {
reader.close();
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Test using the gzip codec for reading
*/
@Test(timeout=10000) public void testGzip() throws IOException, InterruptedException {
Configuration conf=new Configuration(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,conf);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n");
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,workDir);
CombineTextInputFormat format=new CombineTextInputFormat();
List splits=format.getSplits(job);
assertEquals("compressed splits == 1",1,splits.size());
List results=readSplit(format,splits.get(0),job);
assertEquals("splits[0] length",8,results.size());
final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"};
final String[] secondList={"this is a test","of gzip"};
String first=results.get(0).toString();
if (first.equals(firstList[0])) {
testResults(results,firstList,secondList);
}
else if (first.equals(secondList[0])) {
testResults(results,secondList,firstList);
}
else {
fail("unexpected first token!");
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNumInputFilesWithoutRecursively() throws Exception {
Configuration conf=getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
Job job=Job.getInstance(conf);
FileInputFormat,?> fileInputFormat=new TextInputFormat();
List splits=fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct",2,splits.size());
verifySplits(Lists.newArrayList("test:/a1/a2","test:/a1/file1"),splits);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testListStatusErrorOnNonExistantDir() throws IOException {
Configuration conf=new Configuration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
configureTestErrorOnNonExistantDir(conf,localFs);
Job job=Job.getInstance(conf);
FileInputFormat,?> fif=new TextInputFormat();
try {
fif.listStatus(job);
Assert.fail("Expecting an IOException for a missing Input path");
}
catch ( IOException e) {
Path expectedExceptionPath=new Path(TEST_ROOT_DIR,"input2");
expectedExceptionPath=localFs.makeQualified(expectedExceptionPath);
Assert.assertTrue(e instanceof InvalidInputException);
Assert.assertEquals("Input path does not exist: " + expectedExceptionPath.toString(),e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNumInputFilesRecursively() throws Exception {
Configuration conf=getConfiguration();
conf.set(FileInputFormat.INPUT_DIR_RECURSIVE,"true");
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
Job job=Job.getInstance(conf);
FileInputFormat,?> fileInputFormat=new TextInputFormat();
List splits=fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct",3,splits.size());
verifySplits(Lists.newArrayList("test:/a1/a2/file2","test:/a1/a2/file3","test:/a1/file1"),splits);
conf=getConfiguration();
conf.set("mapred.input.dir.recursive","true");
job=Job.getInstance(conf);
splits=fileInputFormat.getSplits(job);
verifySplits(Lists.newArrayList("test:/a1/a2/file2","test:/a1/a2/file3","test:/a1/file1"),splits);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testListLocatedStatus() throws Exception {
Configuration conf=getConfiguration();
conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads);
conf.setBoolean("fs.test.impl.disable.cache",false);
conf.set(FileInputFormat.INPUT_DIR,"test:///a1/a2");
MockFileSystem mockFs=(MockFileSystem)new Path("test:///").getFileSystem(conf);
Assert.assertEquals("listLocatedStatus already called",0,mockFs.numListLocatedStatusCalls);
Job job=Job.getInstance(conf);
FileInputFormat,?> fileInputFormat=new TextInputFormat();
List splits=fileInputFormat.getSplits(job);
Assert.assertEquals("Input splits are not correct",2,splits.size());
Assert.assertEquals("listLocatedStatuss calls",1,mockFs.numListLocatedStatusCalls);
FileSystem.closeAll();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSplitLocationInfo() throws Exception {
Configuration conf=getConfiguration();
conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2");
Job job=Job.getInstance(conf);
TextInputFormat fileInputFormat=new TextInputFormat();
List splits=fileInputFormat.getSplits(job);
String[] locations=splits.get(0).getLocations();
Assert.assertEquals(2,locations.length);
SplitLocationInfo[] locationInfo=splits.get(0).getLocationInfo();
Assert.assertEquals(2,locationInfo.length);
SplitLocationInfo localhostInfo=locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1];
SplitLocationInfo otherhostInfo=locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1];
Assert.assertTrue(localhostInfo.isOnDisk());
Assert.assertTrue(localhostInfo.isInMemory());
Assert.assertTrue(otherhostInfo.isOnDisk());
Assert.assertFalse(otherhostInfo.isInMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec with two input files.
*/
@Test(timeout=5000) public void testGzipWithTwoInputs() throws Exception {
CompressionCodec gzip=new GzipCodec();
localFs.delete(workDir,true);
Job job=Job.getInstance(defaultConf);
FixedLengthInputFormat format=new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(),5);
ReflectionUtils.setConf(gzip,job.getConfiguration());
FileInputFormat.setInputPaths(job,workDir);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"one two threefour five six seveneightnine ten ");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"ten nine eightsevensix five four threetwo one ");
List splits=format.getSplits(job);
assertEquals("compressed splits == 2",2,splits.size());
FileSplit tmp=(FileSplit)splits.get(0);
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits.set(0,splits.get(1));
splits.set(1,tmp);
}
List results=readSplit(format,splits.get(0),job);
assertEquals("splits[0] length",10,results.size());
assertEquals("splits[0][5]","six ",results.get(5));
results=readSplit(format,splits.get(1),job);
assertEquals("splits[1] length",10,results.size());
assertEquals("splits[1][0]","ten ",results.get(0));
assertEquals("splits[1][1]","nine ",results.get(1));
}
InternalCallVerifier EqualityVerifier
/**
* Test the case when a custom record delimiter is specified using the
* textinputformat.record.delimiter configuration property
* @throws IOException
* @throws InterruptedException
* @throws ClassNotFoundException
*/
@Test public void testCustomRecordDelimiters() throws IOException, InterruptedException, ClassNotFoundException {
Configuration conf=new Configuration();
conf.set("textinputformat.record.delimiter","\t\n");
FileSystem localFs=FileSystem.getLocal(conf);
localFs.delete(workDir,true);
createInputFile(conf);
createAndRunJob(conf);
String expected="0\tabc\ndef\n9\tghi\njkl\n";
assertEquals(expected,readOutputFile(conf));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings({"rawtypes","unchecked"}) public void testLastInputSplitAtSplitBoundary() throws Exception {
FileInputFormat fif=new FileInputFormatForTest(1024l * 1024 * 1024,128l * 1024 * 1024);
Configuration conf=new Configuration();
JobContext jobContext=mock(JobContext.class);
when(jobContext.getConfiguration()).thenReturn(conf);
List splits=fif.getSplits(jobContext);
assertEquals(8,splits.size());
for (int i=0; i < splits.size(); i++) {
InputSplit split=splits.get(i);
assertEquals(("host" + i),split.getLocations()[0]);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings({"rawtypes","unchecked"}) public void testLastInputSplitExceedingSplitBoundary() throws Exception {
FileInputFormat fif=new FileInputFormatForTest(1027l * 1024 * 1024,128l * 1024 * 1024);
Configuration conf=new Configuration();
JobContext jobContext=mock(JobContext.class);
when(jobContext.getConfiguration()).thenReturn(conf);
List splits=fif.getSplits(jobContext);
assertEquals(8,splits.size());
for (int i=0; i < splits.size(); i++) {
InputSplit split=splits.get(i);
assertEquals(("host" + i),split.getLocations()[0]);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test when the input file's length is 0.
*/
@Test public void testForEmptyFile() throws Exception {
Configuration conf=new Configuration();
FileSystem fileSys=FileSystem.get(conf);
Path file=new Path("test" + "/file");
FSDataOutputStream out=fileSys.create(file,true,conf.getInt("io.file.buffer.size",4096),(short)1,(long)1024);
out.write(new byte[0]);
out.close();
DummyInputFormat inFormat=new DummyInputFormat();
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,"test");
List splits=inFormat.getSplits(job);
assertEquals(1,splits.size());
FileSplit fileSplit=(FileSplit)splits.get(0);
assertEquals(0,fileSplit.getLocations().length);
assertEquals(file.getName(),fileSplit.getPath().getName());
assertEquals(0,fileSplit.getStart());
assertEquals(0,fileSplit.getLength());
fileSys.delete(file.getParent(),true);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAddInputPath() throws IOException {
final Configuration conf=new Configuration();
conf.set("fs.defaultFS","s3://abc:xyz@hostname/");
final Job j=Job.getInstance(conf);
j.getConfiguration().set("fs.defaultFS","s3://abc:xyz@hostname/");
final FileSystem defaultfs=FileSystem.get(conf);
System.out.println("defaultfs.getUri() = " + defaultfs.getUri());
{
final Path original=new Path("file:/foo");
System.out.println("original = " + original);
FileInputFormat.addInputPath(j,original);
final Path[] results=FileInputFormat.getInputPaths(j);
System.out.println("results = " + Arrays.asList(results));
assertEquals(1,results.length);
assertEquals(original,results[0]);
}
{
final Path original=new Path("file:/bar");
System.out.println("original = " + original);
FileInputFormat.setInputPaths(j,original);
final Path[] results=FileInputFormat.getInputPaths(j);
System.out.println("results = " + Arrays.asList(results));
assertEquals(1,results.length);
assertEquals(original,results[0]);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings({"rawtypes","unchecked"}) public void testLastInputSplitSingleSplit() throws Exception {
FileInputFormat fif=new FileInputFormatForTest(100l * 1024 * 1024,128l * 1024 * 1024);
Configuration conf=new Configuration();
JobContext jobContext=mock(JobContext.class);
when(jobContext.getConfiguration()).thenReturn(conf);
List splits=fif.getSplits(jobContext);
assertEquals(1,splits.size());
for (int i=0; i < splits.size(); i++) {
InputSplit split=splits.get(i);
assertEquals(("host" + i),split.getLocations()[0]);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNewLines() throws Exception {
LineReader in=makeStream("a\nbb\n\nccc\rdddd\r\neeeee");
Text out=new Text();
in.readLine(out);
assertEquals("line1 length",1,out.getLength());
in.readLine(out);
assertEquals("line2 length",2,out.getLength());
in.readLine(out);
assertEquals("line3 length",0,out.getLength());
in.readLine(out);
assertEquals("line4 length",3,out.getLength());
in.readLine(out);
assertEquals("line5 length",4,out.getLength());
in.readLine(out);
assertEquals("line5 length",5,out.getLength());
assertEquals("end of file",0,in.readLine(out));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSplitableCodecs() throws Exception {
final Job job=Job.getInstance(defaultConf);
final Configuration conf=job.getConfiguration();
CompressionCodec codec=null;
try {
codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf);
}
catch ( ClassNotFoundException cnfe) {
throw new IOException("Illegal codec!");
}
Path file=new Path(workDir,"test" + codec.getDefaultExtension());
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int MAX_LENGTH=500000;
FileInputFormat.setMaxInputSplitSize(job,MAX_LENGTH / 20);
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) {
LOG.info("creating; entries = " + length);
Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file)));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
KeyValueTextInputFormat format=new KeyValueTextInputFormat();
assertTrue("KVTIF claims not splittable",format.isSplitable(job,file));
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1;
LOG.info("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.info("splitting: got = " + splits.size());
BitSet bits=new BitSet(length);
for (int j=0; j < splits.size(); j++) {
LOG.debug("split[" + j + "]= "+ splits.get(j));
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(splits.get(j),context);
Class> clazz=reader.getClass();
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j));
reader.initialize(splits.get(j),mcontext);
Text key=null;
Text value=null;
try {
int count=0;
while (reader.nextKeyValue()) {
key=reader.getCurrentKey();
value=reader.getCurrentValue();
final int k=Integer.parseInt(key.toString());
final int v=Integer.parseInt(value.toString());
assertEquals("Bad key",0,k % 2);
assertEquals("Mismatched key/value",k / 2,v);
LOG.debug("read " + k + ","+ v);
assertFalse(k + "," + v+ " in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
if (count > 0) {
LOG.info("splits[" + j + "]="+ splits.get(j)+ " count="+ count);
}
else {
LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count);
}
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test using the gzip codec for reading
*/
@Test public void testGzip() throws IOException, InterruptedException {
Configuration conf=new Configuration(defaultConf);
CompressionCodec gzip=new GzipCodec();
ReflectionUtils.setConf(gzip,conf);
localFs.delete(workDir,true);
writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"line-1\tthe quick\nline-2\tbrown\nline-3\t" + "fox jumped\nline-4\tover\nline-5\t the lazy\nline-6\t dog\n");
writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"line-1\tthis is a test\nline-1\tof gzip\n");
Job job=Job.getInstance(conf);
FileInputFormat.setInputPaths(job,workDir);
KeyValueTextInputFormat format=new KeyValueTextInputFormat();
List splits=format.getSplits(job);
assertEquals("compressed splits == 2",2,splits.size());
FileSplit tmp=(FileSplit)splits.get(0);
if (tmp.getPath().getName().equals("part2.txt.gz")) {
splits.set(0,splits.get(1));
splits.set(1,tmp);
}
List results=readSplit(format,splits.get(0),job);
assertEquals("splits[0] length",6,results.size());
assertEquals("splits[0][0]","the quick",results.get(0).toString());
assertEquals("splits[0][1]","brown",results.get(1).toString());
assertEquals("splits[0][2]","fox jumped",results.get(2).toString());
assertEquals("splits[0][3]","over",results.get(3).toString());
assertEquals("splits[0][4]"," the lazy",results.get(4).toString());
assertEquals("splits[0][5]"," dog",results.get(5).toString());
results=readSplit(format,splits.get(1),job);
assertEquals("splits[1] length",2,results.size());
assertEquals("splits[1][0]","this is a test",results.get(0).toString());
assertEquals("splits[1][1]","of gzip",results.get(1).toString());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFormat() throws Exception {
Job job=Job.getInstance(new Configuration(defaultConf));
Path file=new Path(workDir,"test.txt");
int seed=new Random().nextInt();
LOG.info("seed = " + seed);
Random random=new Random(seed);
localFs.delete(workDir,true);
FileInputFormat.setInputPaths(job,workDir);
final int MAX_LENGTH=10000;
for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) {
LOG.debug("creating; entries = " + length);
Writer writer=new OutputStreamWriter(localFs.create(file));
try {
for (int i=0; i < length; i++) {
writer.write(Integer.toString(i * 2));
writer.write("\t");
writer.write(Integer.toString(i));
writer.write("\n");
}
}
finally {
writer.close();
}
KeyValueTextInputFormat format=new KeyValueTextInputFormat();
for (int i=0; i < 3; i++) {
int numSplits=random.nextInt(MAX_LENGTH / 20) + 1;
LOG.debug("splitting: requesting = " + numSplits);
List splits=format.getSplits(job);
LOG.debug("splitting: got = " + splits.size());
BitSet bits=new BitSet(length);
for (int j=0; j < splits.size(); j++) {
LOG.debug("split[" + j + "]= "+ splits.get(j));
TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader reader=format.createRecordReader(splits.get(j),context);
Class> clazz=reader.getClass();
assertEquals("reader class is KeyValueLineRecordReader.",KeyValueLineRecordReader.class,clazz);
MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j));
reader.initialize(splits.get(j),mcontext);
Text key=null;
Text value=null;
try {
int count=0;
while (reader.nextKeyValue()) {
key=reader.getCurrentKey();
clazz=key.getClass();
assertEquals("Key class is Text.",Text.class,clazz);
value=reader.getCurrentValue();
clazz=value.getClass();
assertEquals("Value class is Text.",Text.class,clazz);
final int k=Integer.parseInt(key.toString());
final int v=Integer.parseInt(value.toString());
assertEquals("Bad key",0,k % 2);
assertEquals("Mismatched key/value",k / 2,v);
LOG.debug("read " + v);
assertFalse("Key in multiple partitions.",bits.get(v));
bits.set(v);
count++;
}
LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count);
}
finally {
reader.close();
}
}
assertEquals("Some keys in no partition.",length,bits.cardinality());
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testUTF8() throws Exception {
LineReader in=makeStream("abcd\u20acbdcd\u20ac");
Text line=new Text();
in.readLine(line);
assertEquals("readLine changed utf8 characters","abcd\u20acbdcd\u20ac",line.toString());
in=makeStream("abc\u200axyz");
in.readLine(line);
assertEquals("split on fake newline","abc\u200axyz",line.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testDoMultipleInputs() throws IOException {
Path in1Dir=getDir(IN1_DIR);
Path in2Dir=getDir(IN2_DIR);
Path outDir=getDir(OUT_DIR);
Configuration conf=createJobConf();
FileSystem fs=FileSystem.get(conf);
fs.delete(outDir,true);
DataOutputStream file1=fs.create(new Path(in1Dir,"part-0"));
file1.writeBytes("a\nb\nc\nd\ne");
file1.close();
DataOutputStream file2=fs.create(new Path(in2Dir,"part-0"));
file2.writeBytes("a\tblah\nb\tblah\nc\tblah\nd\tblah\ne\tblah");
file2.close();
Job job=Job.getInstance(conf);
job.setJobName("mi");
MultipleInputs.addInputPath(job,in1Dir,TextInputFormat.class,MapClass.class);
MultipleInputs.addInputPath(job,in2Dir,KeyValueTextInputFormat.class,KeyValueMapClass.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(Text.class);
job.setReducerClass(ReducerClass.class);
FileOutputFormat.setOutputPath(job,outDir);
boolean success=false;
try {
success=job.waitForCompletion(true);
}
catch ( InterruptedException ie) {
throw new RuntimeException(ie);
}
catch ( ClassNotFoundException instante) {
throw new RuntimeException(instante);
}
if (!success) throw new RuntimeException("Job failed!");
BufferedReader output=new BufferedReader(new InputStreamReader(fs.open(new Path(outDir,"part-r-00000"))));
assertTrue(output.readLine().equals("a 2"));
assertTrue(output.readLine().equals("b 2"));
assertTrue(output.readLine().equals("c 2"));
assertTrue(output.readLine().equals("d 2"));
assertTrue(output.readLine().equals("e 2"));
}
InternalCallVerifier BooleanVerifier
@Test public void testAddingDependingJobToCompletedJobFails() throws Exception {
Configuration conf=new Configuration();
ControlledJob job1=new ControlledJob(conf);
job1.setJobState(ControlledJob.State.SUCCESS);
assertFalse(job1.addDependingJob(new ControlledJob(conf)));
}
InternalCallVerifier BooleanVerifier
@Test public void testAddingDependingJobToRunningJobFails() throws Exception {
Configuration conf=new Configuration();
ControlledJob job1=new ControlledJob(conf);
job1.setJobState(ControlledJob.State.RUNNING);
assertFalse(job1.addDependingJob(new ControlledJob(conf)));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testControlledJob() throws Exception {
LOG.info("Starting testControlledJob");
Configuration conf=createJobConf();
cleanupData(conf);
Job job1=MapReduceTestUtil.createCopyJob(conf,outdir_1,indir);
JobControl theControl=createDependencies(conf,job1);
while (cjob1.getJobState() != ControlledJob.State.RUNNING) {
try {
Thread.sleep(100);
}
catch ( InterruptedException e) {
break;
}
}
Assert.assertNotNull(cjob1.getMapredJobId());
waitTillAllFinished(theControl);
assertEquals("Some jobs failed",0,theControl.getFailedJobList().size());
theControl.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSuccessfulJobs() throws Exception {
JobControl jobControl=new JobControl("Test");
ControlledJob job1=createSuccessfulControlledJob(jobControl);
ControlledJob job2=createSuccessfulControlledJob(jobControl);
ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2);
ControlledJob job4=createSuccessfulControlledJob(jobControl,job3);
runJobControl(jobControl);
assertEquals("Success list",4,jobControl.getSuccessfulJobList().size());
assertEquals("Failed list",0,jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job3.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job4.getJobState() == ControlledJob.State.SUCCESS);
jobControl.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testErrorWhileSubmitting() throws Exception {
JobControl jobControl=new JobControl("Test");
Job mockJob=mock(Job.class);
ControlledJob job1=new ControlledJob(mockJob,null);
when(mockJob.getConfiguration()).thenReturn(new Configuration());
doThrow(new IncompatibleClassChangeError("This is a test")).when(mockJob).submit();
jobControl.addJob(job1);
runJobControl(jobControl);
try {
assertEquals("Success list",0,jobControl.getSuccessfulJobList().size());
assertEquals("Failed list",1,jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.FAILED);
}
finally {
jobControl.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFailedJob() throws Exception {
JobControl jobControl=new JobControl("Test");
ControlledJob job1=createFailedControlledJob(jobControl);
ControlledJob job2=createSuccessfulControlledJob(jobControl);
ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2);
ControlledJob job4=createSuccessfulControlledJob(jobControl,job3);
runJobControl(jobControl);
assertEquals("Success list",1,jobControl.getSuccessfulJobList().size());
assertEquals("Failed list",3,jobControl.getFailedJobList().size());
assertTrue(job1.getJobState() == ControlledJob.State.FAILED);
assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS);
assertTrue(job3.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
assertTrue(job4.getJobState() == ControlledJob.State.DEPENDENT_FAILED);
jobControl.stop();
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* run a distributed job and verify that TokenCache is available
* @throws IOException
*/
@Test public void testBinaryTokenFile() throws IOException {
Configuration conf=mrCluster.getConfig();
final String nnUri=dfsCluster.getURI(0).toString();
conf.set(MRJobConfig.JOB_NAMENODES,nnUri + "," + nnUri);
final String[] args={"-m","1","-r","1","-mt","1","-rt","1"};
int res=-1;
try {
res=ToolRunner.run(conf,new MySleepJob(),args);
}
catch ( Exception e) {
System.out.println("Job failed with " + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0:",0,res);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* run a distributed job and verify that TokenCache is available
* @throws IOException
*/
@Test public void test() throws IOException {
Configuration jobConf=new JobConf(mrCluster.getConfig());
NameNode nn=dfsCluster.getNameNode();
URI nnUri=NameNode.getUri(nn.getNameNodeAddress());
jobConf.set(JobContext.JOB_NAMENODES,nnUri + "," + nnUri.toString());
jobConf.set("mapreduce.job.credentials.json","keys.json");
String[] args={"-m","1","-r","1","-mt","1","-rt","1"};
int res=-1;
try {
res=ToolRunner.run(jobConf,new CredentialsTestJob(),args);
}
catch ( Exception e) {
System.out.println("Job failed with" + e.getLocalizedMessage());
e.printStackTrace(System.out);
fail("Job failed");
}
assertEquals("dist job res is not 0",res,0);
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@SuppressWarnings("deprecation") @Test public void testGetTokensForNamenodes() throws IOException, URISyntaxException {
Path TEST_ROOT_DIR=new Path(System.getProperty("test.build.data","test/build/data"));
String binaryTokenFile=FileSystem.getLocal(conf).makeQualified(new Path(TEST_ROOT_DIR,"tokenFile")).toUri().getPath();
MockFileSystem fs1=createFileSystemForServiceName("service1");
Credentials creds=new Credentials();
Token> token1=fs1.getDelegationToken(renewer);
creds.addToken(token1.getService(),token1);
conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY,binaryTokenFile);
creds.writeTokenStorageFile(new Path(binaryTokenFile),conf);
TokenCache.obtainTokensForNamenodesInternal(fs1,creds,conf);
String fs_addr=fs1.getCanonicalServiceName();
Token> nnt=TokenCache.getDelegationToken(creds,fs_addr);
assertNotNull("Token for nn is null",nnt);
}
InternalCallVerifier NullVerifier
@Test public void testCleanUpTokenReferral() throws Exception {
Configuration conf=new Configuration();
conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY,"foo");
TokenCache.cleanUpTokenReferral(conf);
assertNull(conf.get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY));
}
InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
@Test @SuppressWarnings("deprecation") public void testBinaryCredentials() throws Exception {
Path TEST_ROOT_DIR=new Path(System.getProperty("test.build.data","test/build/data"));
String binaryTokenFile=FileSystem.getLocal(conf).makeQualified(new Path(TEST_ROOT_DIR,"tokenFile")).toUri().getPath();
MockFileSystem fs1=createFileSystemForServiceName("service1");
MockFileSystem fs2=createFileSystemForServiceName("service2");
MockFileSystem fs3=createFileSystemForServiceName("service3");
Credentials creds=new Credentials();
Token> token1=fs1.getDelegationToken(renewer);
Token> token2=fs2.getDelegationToken(renewer);
creds.addToken(token1.getService(),token1);
creds.addToken(token2.getService(),token2);
conf.set(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY,binaryTokenFile);
creds.writeTokenStorageFile(new Path(binaryTokenFile),conf);
creds=new Credentials();
Token> newerToken1=fs1.getDelegationToken(renewer);
assertNotSame(newerToken1,token1);
creds.addToken(newerToken1.getService(),newerToken1);
checkToken(creds,newerToken1);
TokenCache.obtainTokensForNamenodesInternal(fs1,creds,conf);
checkToken(creds,newerToken1,token2);
TokenCache.obtainTokensForNamenodesInternal(fs2,creds,conf);
checkToken(creds,newerToken1,token2);
TokenCache.obtainTokensForNamenodesInternal(fs3,creds,conf);
Token> token3=creds.getToken(new Text(fs3.getCanonicalServiceName()));
assertTrue(token3 != null);
checkToken(creds,newerToken1,token2,token3);
TokenCache.obtainTokensForNamenodesInternal(fs1,creds,conf);
TokenCache.obtainTokensForNamenodesInternal(fs2,creds,conf);
TokenCache.obtainTokensForNamenodesInternal(fs3,creds,conf);
checkToken(creds,newerToken1,token2,token3);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test public void testDelegationToken() throws Exception {
final JobClient client;
client=user1.doAs(new PrivilegedExceptionAction(){
@Override public JobClient run() throws Exception {
return new JobClient(cluster.createJobConf());
}
}
);
final JobClient bobClient;
bobClient=user2.doAs(new PrivilegedExceptionAction(){
@Override public JobClient run() throws Exception {
return new JobClient(cluster.createJobConf());
}
}
);
final Token token=client.getDelegationToken(new Text(user1.getUserName()));
DataInputBuffer inBuf=new DataInputBuffer();
byte[] bytes=token.getIdentifier();
inBuf.reset(bytes,bytes.length);
DelegationTokenIdentifier ident=new DelegationTokenIdentifier();
ident.readFields(inBuf);
assertEquals("alice",ident.getUser().getUserName());
long createTime=ident.getIssueDate();
long maxTime=ident.getMaxDate();
long currentTime=System.currentTimeMillis();
System.out.println("create time: " + createTime);
System.out.println("current time: " + currentTime);
System.out.println("max time: " + maxTime);
assertTrue("createTime < current",createTime < currentTime);
assertTrue("current < maxTime",currentTime < maxTime);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
client.renewDelegationToken(token);
client.renewDelegationToken(token);
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
bobClient.renewDelegationToken(token);
Assert.fail("bob renew");
}
catch ( AccessControlException ace) {
}
return null;
}
}
);
user2.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
bobClient.cancelDelegationToken(token);
Assert.fail("bob cancel");
}
catch ( AccessControlException ace) {
}
return null;
}
}
);
user1.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
client.cancelDelegationToken(token);
try {
client.cancelDelegationToken(token);
Assert.fail("second alice cancel");
}
catch ( InvalidToken it) {
}
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMaxBlockLocationsOldSplits() throws Exception {
TEST_DIR.mkdirs();
try {
Configuration conf=new Configuration();
conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,4);
Path submitDir=new Path(TEST_DIR.getAbsolutePath());
FileSystem fs=FileSystem.getLocal(conf);
org.apache.hadoop.mapred.FileSplit split=new org.apache.hadoop.mapred.FileSplit(new Path("/some/path"),0,1,new String[]{"loc1","loc2","loc3","loc4","loc5"});
JobSplitWriter.createSplitFiles(submitDir,conf,fs,new org.apache.hadoop.mapred.InputSplit[]{split});
JobSplit.TaskSplitMetaInfo[] infos=SplitMetaInfoReader.readSplitMetaInfo(new JobID(),fs,conf,submitDir);
assertEquals("unexpected number of splits",1,infos.length);
assertEquals("unexpected number of split locations",4,infos[0].getLocations().length);
}
finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMaxBlockLocationsNewSplits() throws Exception {
TEST_DIR.mkdirs();
try {
Configuration conf=new Configuration();
conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,4);
Path submitDir=new Path(TEST_DIR.getAbsolutePath());
FileSystem fs=FileSystem.getLocal(conf);
FileSplit split=new FileSplit(new Path("/some/path"),0,1,new String[]{"loc1","loc2","loc3","loc4","loc5"});
JobSplitWriter.createSplitFiles(submitDir,conf,fs,new FileSplit[]{split});
JobSplit.TaskSplitMetaInfo[] infos=SplitMetaInfoReader.readSplitMetaInfo(new JobID(),fs,conf,submitDir);
assertEquals("unexpected number of splits",1,infos.length);
assertEquals("unexpected number of split locations",4,infos[0].getLocations().length);
}
finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testInterruptOnDisk() throws Exception {
final int FETCHER=7;
Path p=new Path("file:///tmp/foo");
Path pTmp=OnDiskMapOutput.getTempPath(p,FETCHER);
FileSystem mFs=mock(FileSystem.class,RETURNS_DEEP_STUBS);
MapOutputFile mof=mock(MapOutputFile.class);
when(mof.getInputFileForWrite(any(TaskID.class),anyLong())).thenReturn(p);
OnDiskMapOutput odmo=spy(new OnDiskMapOutput(map1ID,id,mm,100L,job,mof,FETCHER,true,mFs,p));
when(mm.reserve(any(TaskAttemptID.class),anyLong(),anyInt())).thenReturn(odmo);
doNothing().when(mm).waitForResource();
when(ss.getHost()).thenReturn(host);
String replyHash=SecureShuffleUtils.generateHash(encHash.getBytes(),key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header=new ShuffleHeader(map1ID.toString(),10,10,1);
ByteArrayOutputStream bout=new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
final StuckInputStream in=new StuckInputStream(new ByteArrayInputStream(bout.toByteArray()));
when(connection.getInputStream()).thenReturn(in);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
doAnswer(new Answer(){
public Void answer( InvocationOnMock ignore) throws IOException {
in.close();
return null;
}
}
).when(connection).disconnect();
Fetcher underTest=new FakeFetcher(job,id,ss,mm,r,metrics,except,key,connection,FETCHER);
underTest.start();
in.waitForFetcher();
underTest.shutDown();
underTest.join();
assertTrue(in.wasClosedProperly());
verify(mFs).create(eq(pTmp));
verify(mFs).delete(eq(pTmp),eq(false));
verify(odmo).abort();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testInterruptInMemory() throws Exception {
final int FETCHER=2;
InMemoryMapOutput immo=spy(new InMemoryMapOutput(job,id,mm,100,null,true));
when(mm.reserve(any(TaskAttemptID.class),anyLong(),anyInt())).thenReturn(immo);
doNothing().when(mm).waitForResource();
when(ss.getHost()).thenReturn(host);
String replyHash=SecureShuffleUtils.generateHash(encHash.getBytes(),key);
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
ShuffleHeader header=new ShuffleHeader(map1ID.toString(),10,10,1);
ByteArrayOutputStream bout=new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
final StuckInputStream in=new StuckInputStream(new ByteArrayInputStream(bout.toByteArray()));
when(connection.getInputStream()).thenReturn(in);
doAnswer(new Answer(){
public Void answer( InvocationOnMock ignore) throws IOException {
in.close();
return null;
}
}
).when(connection).disconnect();
Fetcher underTest=new FakeFetcher(job,id,ss,mm,r,metrics,except,key,connection,FETCHER);
underTest.start();
in.waitForFetcher();
underTest.shutDown();
underTest.join();
assertTrue(in.wasClosedProperly());
verify(immo).abort();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testMemoryMerge() throws Exception {
final int TOTAL_MEM_BYTES=10000;
final int OUTPUT_SIZE=7950;
JobConf conf=new JobConf();
conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT,1.0f);
conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,TOTAL_MEM_BYTES);
conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT,0.8f);
conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT,0.9f);
TestExceptionReporter reporter=new TestExceptionReporter();
CyclicBarrier mergeStart=new CyclicBarrier(2);
CyclicBarrier mergeComplete=new CyclicBarrier(2);
StubbedMergeManager mgr=new StubbedMergeManager(conf,reporter,mergeStart,mergeComplete);
MapOutput out1=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput));
InMemoryMapOutput mout1=(InMemoryMapOutput)out1;
fillOutput(mout1);
MapOutput out2=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput));
InMemoryMapOutput mout2=(InMemoryMapOutput)out2;
fillOutput(mout2);
MapOutput out3=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertEquals("Should be told to wait",null,out3);
mout1.commit();
mout2.commit();
mergeStart.await();
Assert.assertEquals(1,mgr.getNumMerges());
out1=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput));
mout1=(InMemoryMapOutput)out1;
fillOutput(mout1);
out2=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput));
mout2=(InMemoryMapOutput)out2;
fillOutput(mout2);
out3=mgr.reserve(null,OUTPUT_SIZE,0);
Assert.assertEquals("Should be told to wait",null,out3);
mout1.commit();
mout2.commit();
mergeComplete.await();
mergeStart.await();
Assert.assertEquals(2,mgr.getNumMerges());
mergeComplete.await();
Assert.assertEquals(2,mgr.getNumMerges());
Assert.assertEquals("exception reporter invoked",0,reporter.getNumExceptions());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings({"unchecked","deprecation"}) @Test(timeout=10000) public void testOnDiskMerger() throws IOException, URISyntaxException, InterruptedException {
JobConf jobConf=new JobConf();
final int SORT_FACTOR=5;
jobConf.setInt(MRJobConfig.IO_SORT_FACTOR,SORT_FACTOR);
MapOutputFile mapOutputFile=new MROutputFiles();
FileSystem fs=FileSystem.getLocal(jobConf);
MergeManagerImpl manager=new MergeManagerImpl(null,jobConf,fs,null,null,null,null,null,null,null,null,null,null,mapOutputFile);
MergeThread,IntWritable,IntWritable> onDiskMerger=(MergeThread,IntWritable,IntWritable>)Whitebox.getInternalState(manager,"onDiskMerger");
int mergeFactor=(Integer)Whitebox.getInternalState(onDiskMerger,"mergeFactor");
assertEquals(mergeFactor,SORT_FACTOR);
onDiskMerger.suspend();
Random rand=new Random();
for (int i=0; i < 2 * SORT_FACTOR; ++i) {
Path path=new Path("somePath");
CompressAwarePath cap=new CompressAwarePath(path,1l,rand.nextInt());
manager.closeOnDiskFile(cap);
}
LinkedList> pendingToBeMerged=(LinkedList>)Whitebox.getInternalState(onDiskMerger,"pendingToBeMerged");
assertTrue("No inputs were added to list pending to merge",pendingToBeMerged.size() > 0);
for (int i=0; i < pendingToBeMerged.size(); ++i) {
List inputs=pendingToBeMerged.get(i);
for (int j=1; j < inputs.size(); ++j) {
assertTrue("Not enough / too many inputs were going to be merged",inputs.size() > 0 && inputs.size() <= SORT_FACTOR);
assertTrue("Inputs to be merged were not sorted according to size: ",inputs.get(j).getCompressedSize() >= inputs.get(j - 1).getCompressedSize());
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testInMemoryAndOnDiskMerger() throws Throwable {
JobID jobId=new JobID("a",0);
TaskAttemptID reduceId1=new TaskAttemptID(new TaskID(jobId,TaskType.REDUCE,0),0);
TaskAttemptID mapId1=new TaskAttemptID(new TaskID(jobId,TaskType.MAP,1),0);
TaskAttemptID mapId2=new TaskAttemptID(new TaskID(jobId,TaskType.MAP,2),0);
LocalDirAllocator lda=new LocalDirAllocator(MRConfig.LOCAL_DIR);
MergeManagerImpl mergeManager=new MergeManagerImpl(reduceId1,jobConf,fs,lda,Reporter.NULL,null,null,null,null,null,null,null,new Progress(),new MROutputFiles());
Map map1=new TreeMap();
map1.put("apple","disgusting");
map1.put("carrot","delicious");
Map map2=new TreeMap();
map1.put("banana","pretty good");
byte[] mapOutputBytes1=writeMapOutput(conf,map1);
byte[] mapOutputBytes2=writeMapOutput(conf,map2);
InMemoryMapOutput mapOutput1=new InMemoryMapOutput(conf,mapId1,mergeManager,mapOutputBytes1.length,null,true);
InMemoryMapOutput mapOutput2=new InMemoryMapOutput(conf,mapId2,mergeManager,mapOutputBytes2.length,null,true);
System.arraycopy(mapOutputBytes1,0,mapOutput1.getMemory(),0,mapOutputBytes1.length);
System.arraycopy(mapOutputBytes2,0,mapOutput2.getMemory(),0,mapOutputBytes2.length);
MergeThread,Text,Text> inMemoryMerger=mergeManager.createInMemoryMerger();
List> mapOutputs1=new ArrayList>();
mapOutputs1.add(mapOutput1);
mapOutputs1.add(mapOutput2);
inMemoryMerger.merge(mapOutputs1);
Assert.assertEquals(1,mergeManager.onDiskMapOutputs.size());
TaskAttemptID reduceId2=new TaskAttemptID(new TaskID(jobId,TaskType.REDUCE,3),0);
TaskAttemptID mapId3=new TaskAttemptID(new TaskID(jobId,TaskType.MAP,4),0);
TaskAttemptID mapId4=new TaskAttemptID(new TaskID(jobId,TaskType.MAP,5),0);
Map map3=new TreeMap();
map3.put("apple","awesome");
map3.put("carrot","amazing");
Map map4=new TreeMap();
map4.put("banana","bla");
byte[] mapOutputBytes3=writeMapOutput(conf,map3);
byte[] mapOutputBytes4=writeMapOutput(conf,map4);
InMemoryMapOutput mapOutput3=new InMemoryMapOutput(conf,mapId3,mergeManager,mapOutputBytes3.length,null,true);
InMemoryMapOutput mapOutput4=new InMemoryMapOutput(conf,mapId4,mergeManager,mapOutputBytes4.length,null,true);
System.arraycopy(mapOutputBytes3,0,mapOutput3.getMemory(),0,mapOutputBytes3.length);
System.arraycopy(mapOutputBytes4,0,mapOutput4.getMemory(),0,mapOutputBytes4.length);
MergeThread,Text,Text> inMemoryMerger2=mergeManager.createInMemoryMerger();
List> mapOutputs2=new ArrayList>();
mapOutputs2.add(mapOutput3);
mapOutputs2.add(mapOutput4);
inMemoryMerger2.merge(mapOutputs2);
Assert.assertEquals(2,mergeManager.onDiskMapOutputs.size());
List paths=new ArrayList();
Iterator iterator=mergeManager.onDiskMapOutputs.iterator();
List keys=new ArrayList();
List values=new ArrayList();
while (iterator.hasNext()) {
CompressAwarePath next=iterator.next();
readOnDiskMapOutput(conf,fs,next,keys,values);
paths.add(next);
}
Assert.assertEquals(keys,Arrays.asList("apple","banana","carrot","apple","banana","carrot"));
Assert.assertEquals(values,Arrays.asList("awesome","bla","amazing","disgusting","pretty good","delicious"));
mergeManager.close();
mergeManager=new MergeManagerImpl(reduceId2,jobConf,fs,lda,Reporter.NULL,null,null,null,null,null,null,null,new Progress(),new MROutputFiles());
MergeThread onDiskMerger=mergeManager.createOnDiskMerger();
onDiskMerger.merge(paths);
Assert.assertEquals(1,mergeManager.onDiskMapOutputs.size());
keys=new ArrayList();
values=new ArrayList();
readOnDiskMapOutput(conf,fs,mergeManager.onDiskMapOutputs.iterator().next(),keys,values);
Assert.assertEquals(keys,Arrays.asList("apple","apple","banana","banana","carrot","carrot"));
Assert.assertEquals(values,Arrays.asList("awesome","disgusting","pretty good","bla","amazing","delicious"));
mergeManager.close();
Assert.assertEquals(0,mergeManager.inMemoryMapOutputs.size());
Assert.assertEquals(0,mergeManager.inMemoryMergedMapOutputs.size());
Assert.assertEquals(0,mergeManager.onDiskMapOutputs.size());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("rawtypes") @Test public void testTipFailed() throws Exception {
JobConf job=new JobConf();
job.setNumMapTasks(2);
TaskStatus status=new TaskStatus(){
@Override public boolean getIsMap(){
return false;
}
@Override public void addFetchFailedMap( TaskAttemptID mapTaskId){
}
}
;
Progress progress=new Progress();
TaskAttemptID reduceId=new TaskAttemptID("314159",0,TaskType.REDUCE,0,0);
ShuffleSchedulerImpl scheduler=new ShuffleSchedulerImpl(job,status,reduceId,null,progress,null,null,null);
JobID jobId=new JobID();
TaskID taskId1=new TaskID(jobId,TaskType.REDUCE,1);
scheduler.tipFailed(taskId1);
Assert.assertEquals("Progress should be 0.5",0.5f,progress.getProgress(),0.0f);
Assert.assertFalse(scheduler.waitUntilDone(1));
TaskID taskId0=new TaskID(jobId,TaskType.REDUCE,0);
scheduler.tipFailed(taskId0);
Assert.assertEquals("Progress should be 1.0",1.0f,progress.getProgress(),0.0f);
Assert.assertTrue(scheduler.waitUntilDone(1));
}
InternalCallVerifier EqualityVerifier
@Test public void testListAttemptIdsWithValidInput() throws Exception {
JobID jobId=JobID.forName(jobIdStr);
Cluster mockCluster=mock(Cluster.class);
Job job=mock(Job.class);
CLI cli=spy(new CLI());
doReturn(mockCluster).when(cli).createCluster();
when(job.getTaskReports(TaskType.MAP)).thenReturn(getTaskReports(jobId,TaskType.MAP));
when(job.getTaskReports(TaskType.REDUCE)).thenReturn(getTaskReports(jobId,TaskType.REDUCE));
when(mockCluster.getJob(jobId)).thenReturn(job);
int retCode_MAP=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"MAP","running"});
int retCode_map=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"map","running"});
int retCode_REDUCE=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"REDUCE","running"});
int retCode_completed=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"REDUCE","completed"});
assertEquals("MAP is a valid input,exit code should be 0",0,retCode_MAP);
assertEquals("map is a valid input,exit code should be 0",0,retCode_map);
assertEquals("REDUCE is a valid input,exit code should be 0",0,retCode_REDUCE);
assertEquals("REDUCE and completed are a valid inputs to -list-attempt-ids,exit code should be 0",0,retCode_completed);
verify(job,times(2)).getTaskReports(TaskType.MAP);
verify(job,times(2)).getTaskReports(TaskType.REDUCE);
}
InternalCallVerifier EqualityVerifier
@Test public void testListAttemptIdsWithInvalidInputs() throws Exception {
JobID jobId=JobID.forName(jobIdStr);
Cluster mockCluster=mock(Cluster.class);
Job job=mock(Job.class);
CLI cli=spy(new CLI());
doReturn(mockCluster).when(cli).createCluster();
when(mockCluster.getJob(jobId)).thenReturn(job);
int retCode_JOB_SETUP=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"JOB_SETUP","running"});
int retCode_JOB_CLEANUP=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"JOB_CLEANUP","running"});
int retCode_invalidTaskState=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"REDUCE","complete"});
assertEquals("JOB_SETUP is an invalid input,exit code should be -1",-1,retCode_JOB_SETUP);
assertEquals("JOB_CLEANUP is an invalid input,exit code should be -1",-1,retCode_JOB_CLEANUP);
assertEquals("complete is an invalid input,exit code should be -1",-1,retCode_invalidTaskState);
}
InternalCallVerifier EqualityVerifier
@Test public void testRelativeToWorking(){
assertEquals(".",relativeToWorking(System.getProperty("user.dir",".")));
String cwd=System.getProperty("user.dir",".");
Path cwdPath=new Path(cwd);
Path subdir=new Path(cwdPath,"foo");
assertEquals("foo",relativeToWorking(subdir.toUri().getPath()));
Path subsubdir=new Path(subdir,"bar");
assertEquals("foo/bar",relativeToWorking(subsubdir.toUri().getPath()));
Path parent=new Path(cwdPath,"..");
assertEquals("..",relativeToWorking(parent.toUri().getPath()));
Path sideways=new Path(parent,"baz");
assertEquals("../baz",relativeToWorking(sideways.toUri().getPath()));
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* This test creates some directories and then removes them through
* MRAsyncDiskService.
*/
@Test public void testMRAsyncDiskService() throws Throwable {
FileSystem localFileSystem=FileSystem.getLocal(new Configuration());
String[] vols=new String[]{TEST_ROOT_DIR + "/0",TEST_ROOT_DIR + "/1"};
MRAsyncDiskService service=new MRAsyncDiskService(localFileSystem,vols);
String a="a";
String b="b";
String c="b/c";
String d="d";
File fa=new File(vols[0],a);
File fb=new File(vols[1],b);
File fc=new File(vols[1],c);
File fd=new File(vols[1],d);
fa.mkdirs();
fb.mkdirs();
fc.mkdirs();
fd.mkdirs();
assertTrue(fa.exists());
assertTrue(fb.exists());
assertTrue(fc.exists());
assertTrue(fd.exists());
service.moveAndDeleteRelativePath(vols[0],a);
assertFalse(fa.exists());
service.moveAndDeleteRelativePath(vols[1],b);
assertFalse(fb.exists());
assertFalse(fc.exists());
assertFalse(service.moveAndDeleteRelativePath(vols[1],"not_exists"));
IOException ee=null;
try {
service.moveAndDeleteAbsolutePath(TEST_ROOT_DIR + "/2");
}
catch ( IOException e) {
ee=e;
}
assertNotNull("asyncDiskService should not be able to delete files " + "outside all volumes",ee);
assertTrue(service.moveAndDeleteAbsolutePath(vols[1] + Path.SEPARATOR_CHAR + d));
makeSureCleanedUp(vols,service);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* To ensure nothing broken after we removed normalization
* from the MRAM side
* @throws Exception
*/
@Test public void testJobWithNonNormalizedCapabilities() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf jobConf=new JobConf(mrCluster.getConfig());
jobConf.setInt("mapreduce.map.memory.mb",700);
jobConf.setInt("mapred.reduce.memory.mb",1500);
SleepJob sleepJob=new SleepJob();
sleepJob.setConf(jobConf);
Job job=sleepJob.createJob(3,2,1000,1,500,1);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR);
job.submit();
boolean completed=job.waitForCompletion(true);
Assert.assertTrue("Job should be completed",completed);
Assert.assertEquals("Job should be finished successfully",JobStatus.State.SUCCEEDED,job.getJobState());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=300000) public void testSleepJob() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testSleepJob().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Configuration sleepConf=new Configuration(mrCluster.getConfig());
sleepConf.set(MRConfig.MASTER_ADDRESS,"local");
SleepJob sleepJob=new SleepJob();
sleepJob.setConf(sleepConf);
int numReduces=sleepConf.getInt("TestMRJobs.testSleepJob.reduces",2);
Job job=sleepJob.createJob(3,numReduces,10000,1,5000,1);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(SleepJob.class);
job.setMaxMapAttempts(1);
job.submit();
String trackingUrl=job.getTrackingURL();
String jobId=job.getJobID().toString();
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
verifySleepJobCounters(job);
verifyTaskProgress(job);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRandomWriter() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testRandomWriter().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
RandomTextWriterJob randomWriterJob=new RandomTextWriterJob();
mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES,"3072");
mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP,"1024");
Job job=randomWriterJob.createJob(mrCluster.getConfig());
Path outputDir=new Path(OUTPUT_ROOT_DIR,"random-output");
FileOutputFormat.setOutputPath(job,outputDir);
job.setSpeculativeExecution(false);
job.addFileToClassPath(APP_JAR);
job.setJarByClass(RandomTextWriterJob.class);
job.setMaxMapAttempts(1);
job.submit();
String trackingUrl=job.getTrackingURL();
String jobId=job.getJobID().toString();
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));
RemoteIterator iterator=FileContext.getFileContext(mrCluster.getConfig()).listStatus(outputDir);
int count=0;
while (iterator.hasNext()) {
FileStatus file=iterator.next();
if (!file.getPath().getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) {
count++;
}
}
Assert.assertEquals("Number of part files is wrong!",3,count);
verifyRandomWriterCounters(job);
}
IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testContainerRollingLog() throws IOException, InterruptedException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
final SleepJob sleepJob=new SleepJob();
final JobConf sleepConf=new JobConf(mrCluster.getConfig());
sleepConf.set(MRJobConfig.MAP_LOG_LEVEL,Level.ALL.toString());
final long userLogKb=4;
sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT,userLogKb);
sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS,3);
sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL,Level.ALL.toString());
final long amLogKb=7;
sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB,amLogKb);
sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS,7);
sleepJob.setConf(sleepConf);
final Job job=sleepJob.createJob(1,0,1L,100,0L,0);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR);
job.waitForCompletion(true);
final JobId jobId=TypeConverter.toYarn(job.getJobID());
final ApplicationId appID=jobId.getAppId();
int pollElapsed=0;
while (true) {
Thread.sleep(1000);
pollElapsed+=1000;
if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED,mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());
final String appIdStr=appID.toString();
final String appIdSuffix=appIdStr.substring("application_".length(),appIdStr.length());
final String containerGlob="container_" + appIdSuffix + "_*_*";
final String syslogGlob=appIdStr + Path.SEPARATOR + containerGlob+ Path.SEPARATOR+ TaskLog.LogName.SYSLOG;
int numAppMasters=0;
int numMapTasks=0;
for (int i=0; i < NUM_NODE_MGRS; i++) {
final Configuration nmConf=mrCluster.getNodeManager(i).getConfig();
for ( String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) {
final Path absSyslogGlob=new Path(logDir + Path.SEPARATOR + syslogGlob);
LOG.info("Checking for glob: " + absSyslogGlob);
final FileStatus[] syslogs=localFs.globStatus(absSyslogGlob);
for ( FileStatus slog : syslogs) {
boolean foundAppMaster=job.isUber();
final Path containerPathComponent=slog.getPath().getParent();
if (!foundAppMaster) {
final ContainerId cid=ConverterUtils.toContainerId(containerPathComponent.getName());
foundAppMaster=(cid.getId() == 1);
}
final FileStatus[] sysSiblings=localFs.globStatus(new Path(containerPathComponent,TaskLog.LogName.SYSLOG + "*"));
Arrays.sort(sysSiblings);
if (foundAppMaster) {
numAppMasters++;
}
else {
numMapTasks++;
}
if (foundAppMaster) {
Assert.assertSame("Unexpected number of AM sylog* files",sleepConf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS,0) + 1,sysSiblings.length);
Assert.assertTrue("AM syslog.1 length kb should be >= " + amLogKb,sysSiblings[1].getLen() >= amLogKb * 1024);
}
else {
Assert.assertSame("Unexpected number of MR task sylog* files",sleepConf.getInt(MRJobConfig.TASK_LOG_BACKUPS,0) + 1,sysSiblings.length);
Assert.assertTrue("MR syslog.1 length kb should be >= " + userLogKb,sysSiblings[1].getLen() >= userLogKb * 1024);
}
}
}
}
Assert.assertEquals("No AppMaster log found!",1,numAppMasters);
if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false)) {
Assert.assertEquals("MapTask log with uber found!",0,numMapTasks);
}
else {
Assert.assertEquals("No MapTask log found!",1,numMapTasks);
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testFailingMapper() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testFailingMapper().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Job job=runFailingMapperJob();
TaskID taskID=new TaskID(job.getJobID(),TaskType.MAP,0);
TaskAttemptID aId=new TaskAttemptID(taskID,0);
System.out.println("Diagnostics for " + aId + " :");
for ( String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
aId=new TaskAttemptID(taskID,1);
System.out.println("Diagnostics for " + aId + " :");
for ( String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
TaskCompletionEvent[] events=job.getTaskCompletionEvents(0,2);
Assert.assertEquals(TaskCompletionEvent.Status.FAILED,events[0].getStatus());
Assert.assertEquals(TaskCompletionEvent.Status.TIPFAILED,events[1].getStatus());
Assert.assertEquals(JobStatus.State.FAILED,job.getJobState());
verifyFailingMapperCounters(job);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=90000) public void testJobHistoryData() throws IOException, InterruptedException, AvroRemoteException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
SleepJob sleepJob=new SleepJob();
sleepJob.setConf(mrCluster.getConfig());
Job job=sleepJob.createJob(3,2,1000,1,500,1);
job.setJarByClass(SleepJob.class);
job.addFileToClassPath(APP_JAR);
job.waitForCompletion(true);
Counters counterMR=job.getCounters();
JobId jobId=TypeConverter.toYarn(job.getJobID());
ApplicationId appID=jobId.getAppId();
int pollElapsed=0;
while (true) {
Thread.sleep(1000);
pollElapsed+=1000;
if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED,mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());
Counters counterHS=job.getCounters();
LOG.info("CounterHS " + counterHS);
LOG.info("CounterMR " + counterMR);
Assert.assertEquals(counterHS,counterMR);
HSClientProtocol historyClient=instantiateHistoryProxy();
GetJobReportRequest gjReq=Records.newRecord(GetJobReportRequest.class);
gjReq.setJobId(jobId);
JobReport jobReport=historyClient.getJobReport(gjReq).getJobReport();
verifyJobReport(jobReport,jobId);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobSucceed() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testJobSucceed().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf conf=new JobConf(mrCluster.getConfig());
Path in=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"in");
Path out=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"out");
runJobSucceed(conf,in,out);
FileSystem fs=FileSystem.get(conf);
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobFail() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting testJobFail().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
JobConf conf=new JobConf(mrCluster.getConfig());
Path in=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"fail-in");
Path out=new Path(mrCluster.getTestWorkDir().getAbsolutePath(),"fail-out");
runJobFail(conf,in,out);
FileSystem fs=FileSystem.get(conf);
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_SETUP_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.JOB_ABORT_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.JOB_COMMIT_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_SETUP_FILE_NAME)));
Assert.assertTrue(fs.exists(new Path(out,CustomOutputCommitter.TASK_ABORT_FILE_NAME)));
Assert.assertFalse(fs.exists(new Path(out,CustomOutputCommitter.TASK_COMMIT_FILE_NAME)));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRMNMInfo() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
RMContext rmc=mrCluster.getResourceManager().getRMContext();
ResourceScheduler rms=mrCluster.getResourceManager().getResourceScheduler();
RMNMInfo rmInfo=new RMNMInfo(rmc,rms);
String liveNMs=rmInfo.getLiveNodeManagers();
ObjectMapper mapper=new ObjectMapper();
JsonNode jn=mapper.readTree(liveNMs);
Assert.assertEquals("Unexpected number of live nodes:",NUMNODEMANAGERS,jn.size());
Iterator it=jn.iterator();
while (it.hasNext()) {
JsonNode n=it.next();
Assert.assertNotNull(n.get("HostName"));
Assert.assertNotNull(n.get("Rack"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING"));
Assert.assertNotNull(n.get("NodeHTTPAddress"));
Assert.assertNotNull(n.get("LastHealthUpdate"));
Assert.assertNotNull(n.get("HealthReport"));
Assert.assertNotNull(n.get("NodeManagerVersion"));
Assert.assertNotNull(n.get("NumContainers"));
Assert.assertEquals(n.get("NodeId") + ": Unexpected number of used containers",0,n.get("NumContainers").asInt());
Assert.assertEquals(n.get("NodeId") + ": Unexpected amount of used memory",0,n.get("UsedMemoryMB").asInt());
Assert.assertNotNull(n.get("AvailableMemoryMB"));
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRMNMInfoMissmatch() throws Exception {
RMContext rmc=mock(RMContext.class);
ResourceScheduler rms=mock(ResourceScheduler.class);
ConcurrentMap map=new ConcurrentHashMap();
RMNode node=MockNodes.newNodeInfo(1,MockNodes.newResource(4 * 1024));
map.put(node.getNodeID(),node);
when(rmc.getRMNodes()).thenReturn(map);
RMNMInfo rmInfo=new RMNMInfo(rmc,rms);
String liveNMs=rmInfo.getLiveNodeManagers();
ObjectMapper mapper=new ObjectMapper();
JsonNode jn=mapper.readTree(liveNMs);
Assert.assertEquals("Unexpected number of live nodes:",1,jn.size());
Iterator it=jn.iterator();
while (it.hasNext()) {
JsonNode n=it.next();
Assert.assertNotNull(n.get("HostName"));
Assert.assertNotNull(n.get("Rack"));
Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING"));
Assert.assertNotNull(n.get("NodeHTTPAddress"));
Assert.assertNotNull(n.get("LastHealthUpdate"));
Assert.assertNotNull(n.get("HealthReport"));
Assert.assertNotNull(n.get("NodeManagerVersion"));
Assert.assertNull(n.get("NumContainers"));
Assert.assertNull(n.get("UsedMemoryMB"));
Assert.assertNull(n.get("AvailableMemoryMB"));
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPbRecordFactory(){
RecordFactory pbRecordFactory=RecordFactoryPBImpl.get();
try {
CounterGroup response=pbRecordFactory.newRecordInstance(CounterGroup.class);
Assert.assertEquals(CounterGroupPBImpl.class,response.getClass());
}
catch ( YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
try {
GetCountersRequest response=pbRecordFactory.newRecordInstance(GetCountersRequest.class);
Assert.assertEquals(GetCountersRequestPBImpl.class,response.getClass());
}
catch ( YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testSpeculativeExecution() throws Exception {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Job job=runSpecTest(false,false);
boolean succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
Counters counters=job.getCounters();
Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue());
Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
Assert.assertEquals(0,counters.findCounter(JobCounter.NUM_FAILED_MAPS).getValue());
job=runSpecTest(true,false);
succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
counters=job.getCounters();
Assert.assertEquals(3,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue());
Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
Assert.assertEquals(0,counters.findCounter(JobCounter.NUM_FAILED_MAPS).getValue());
Assert.assertEquals(1,counters.findCounter(JobCounter.NUM_KILLED_MAPS).getValue());
job=runSpecTest(false,true);
succeeded=job.waitForCompletion(true);
Assert.assertTrue(succeeded);
Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState());
counters=job.getCounters();
Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue());
Assert.assertEquals(3,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue());
}
InternalCallVerifier EqualityVerifier
@Test public void testSepculateSuccessfulWithUpdateEvents() throws Exception {
Clock actualClock=new SystemClock();
final ControlledClock clock=new ControlledClock(actualClock);
clock.setTime(System.currentTimeMillis());
MRApp app=new MRApp(NUM_MAPPERS,NUM_REDUCERS,false,"test",true,clock);
Job job=app.submit(new Configuration(),true,true);
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",NUM_MAPPERS + NUM_REDUCERS,tasks.size());
Iterator taskIter=tasks.values().iterator();
while (taskIter.hasNext()) {
app.waitForState(taskIter.next(),TaskState.RUNNING);
}
clock.setTime(System.currentTimeMillis() + 1000);
EventHandler appEventHandler=app.getContext().getEventHandler();
for ( Map.Entry mapTask : tasks.entrySet()) {
for ( Map.Entry taskAttempt : mapTask.getValue().getAttempts().entrySet()) {
TaskAttemptStatus status=createTaskAttemptStatus(taskAttempt.getKey(),(float)0.5,TaskAttemptState.RUNNING);
TaskAttemptStatusUpdateEvent event=new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(),status);
appEventHandler.handle(event);
}
}
Task speculatedTask=null;
int numTasksToFinish=NUM_MAPPERS + NUM_REDUCERS - 1;
clock.setTime(System.currentTimeMillis() + 1000);
for ( Map.Entry task : tasks.entrySet()) {
for ( Map.Entry taskAttempt : task.getValue().getAttempts().entrySet()) {
if (numTasksToFinish > 0) {
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),TaskAttemptEventType.TA_CONTAINER_CLEANED));
numTasksToFinish--;
app.waitForState(taskAttempt.getValue(),TaskAttemptState.SUCCEEDED);
}
else {
TaskAttemptStatus status=createTaskAttemptStatus(taskAttempt.getKey(),(float)0.75,TaskAttemptState.RUNNING);
speculatedTask=task.getValue();
TaskAttemptStatusUpdateEvent event=new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(),status);
appEventHandler.handle(event);
}
}
}
clock.setTime(System.currentTimeMillis() + 15000);
for ( Map.Entry task : tasks.entrySet()) {
for ( Map.Entry taskAttempt : task.getValue().getAttempts().entrySet()) {
if (taskAttempt.getValue().getState() != TaskAttemptState.SUCCEEDED) {
TaskAttemptStatus status=createTaskAttemptStatus(taskAttempt.getKey(),(float)0.75,TaskAttemptState.RUNNING);
TaskAttemptStatusUpdateEvent event=new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(),status);
appEventHandler.handle(event);
}
}
}
final Task speculatedTaskConst=speculatedTask;
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
if (speculatedTaskConst.getAttempts().size() != 2) {
clock.setTime(System.currentTimeMillis() + 1000);
return false;
}
else {
return true;
}
}
}
,1000,60000);
TaskAttempt[] ta=makeFirstAttemptWin(appEventHandler,speculatedTask);
verifySpeculationMessage(app,ta);
app.waitForState(Service.STATE.STOPPED);
}
InternalCallVerifier EqualityVerifier
@Test public void testSpeculateSuccessfulWithoutUpdateEvents() throws Exception {
Clock actualClock=new SystemClock();
final ControlledClock clock=new ControlledClock(actualClock);
clock.setTime(System.currentTimeMillis());
MRApp app=new MRApp(NUM_MAPPERS,NUM_REDUCERS,false,"test",true,clock);
Job job=app.submit(new Configuration(),true,true);
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",NUM_MAPPERS + NUM_REDUCERS,tasks.size());
Iterator taskIter=tasks.values().iterator();
while (taskIter.hasNext()) {
app.waitForState(taskIter.next(),TaskState.RUNNING);
}
clock.setTime(System.currentTimeMillis() + 2000);
EventHandler appEventHandler=app.getContext().getEventHandler();
for ( Map.Entry mapTask : tasks.entrySet()) {
for ( Map.Entry taskAttempt : mapTask.getValue().getAttempts().entrySet()) {
TaskAttemptStatus status=createTaskAttemptStatus(taskAttempt.getKey(),(float)0.8,TaskAttemptState.RUNNING);
TaskAttemptStatusUpdateEvent event=new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(),status);
appEventHandler.handle(event);
}
}
Random generator=new Random();
Object[] taskValues=tasks.values().toArray();
final Task taskToBeSpeculated=(Task)taskValues[generator.nextInt(taskValues.length)];
for ( Map.Entry mapTask : tasks.entrySet()) {
for ( Map.Entry taskAttempt : mapTask.getValue().getAttempts().entrySet()) {
if (mapTask.getKey() != taskToBeSpeculated.getID()) {
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),TaskAttemptEventType.TA_DONE));
appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),TaskAttemptEventType.TA_CONTAINER_CLEANED));
app.waitForState(taskAttempt.getValue(),TaskAttemptState.SUCCEEDED);
}
}
}
GenericTestUtils.waitFor(new Supplier(){
@Override public Boolean get(){
if (taskToBeSpeculated.getAttempts().size() != 2) {
clock.setTime(System.currentTimeMillis() + 1000);
return false;
}
else {
return true;
}
}
}
,1000,60000);
TaskAttempt[] ta=makeFirstAttemptWin(appEventHandler,taskToBeSpeculated);
verifySpeculationMessage(app,ta);
app.waitForState(Service.STATE.STOPPED);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Override @Test public void testFailingMapper() throws IOException, InterruptedException, ClassNotFoundException {
LOG.info("\n\n\nStarting uberized testFailingMapper().");
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
Job job=runFailingMapperJob();
TaskID taskID=new TaskID(job.getJobID(),TaskType.MAP,0);
TaskAttemptID aId=new TaskAttemptID(taskID,0);
System.out.println("Diagnostics for " + aId + " :");
for ( String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
boolean secondTaskAttemptExists=true;
try {
aId=new TaskAttemptID(taskID,1);
System.out.println("Diagnostics for " + aId + " :");
for ( String diag : job.getTaskDiagnostics(aId)) {
System.out.println(diag);
}
}
catch ( Exception e) {
secondTaskAttemptExists=false;
}
Assert.assertEquals(false,secondTaskAttemptExists);
TaskCompletionEvent[] events=job.getTaskCompletionEvents(0,2);
Assert.assertEquals(1,events.length);
TaskCompletionEvent.Status status=events[0].getStatus();
Assert.assertTrue(status == TaskCompletionEvent.Status.FAILED || status == TaskCompletionEvent.Status.TIPFAILED);
Assert.assertEquals(JobStatus.State.FAILED,job.getJobState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTaskAttemptId(){
long ts1=1315890136000l;
long ts2=1315890136001l;
TaskAttemptId t1=createTaskAttemptId(ts1,2,2,TaskType.MAP,2);
TaskAttemptId t2=createTaskAttemptId(ts1,2,2,TaskType.REDUCE,2);
TaskAttemptId t3=createTaskAttemptId(ts1,2,2,TaskType.MAP,3);
TaskAttemptId t4=createTaskAttemptId(ts1,2,2,TaskType.MAP,1);
TaskAttemptId t5=createTaskAttemptId(ts1,2,1,TaskType.MAP,3);
TaskAttemptId t6=createTaskAttemptId(ts1,2,2,TaskType.MAP,2);
assertTrue(t1.equals(t6));
assertFalse(t1.equals(t2));
assertFalse(t1.equals(t3));
assertFalse(t1.equals(t5));
assertTrue(t1.compareTo(t6) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) < 0);
assertTrue(t1.compareTo(t4) > 0);
assertTrue(t1.compareTo(t5) > 0);
assertTrue(t1.hashCode() == t6.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskAttemptId t7=createTaskAttemptId(ts2,5463346,4326575,TaskType.REDUCE,54375);
assertEquals("attempt_" + ts1 + "_0002_m_000002_2",t1.toString());
assertEquals("attempt_" + ts2 + "_5463346_r_4326575_54375",t7.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTaskId(){
long ts1=1315890136000l;
long ts2=1315890136001l;
TaskId t1=createTaskId(ts1,1,2,TaskType.MAP);
TaskId t2=createTaskId(ts1,1,2,TaskType.REDUCE);
TaskId t3=createTaskId(ts1,1,1,TaskType.MAP);
TaskId t4=createTaskId(ts1,1,2,TaskType.MAP);
TaskId t5=createTaskId(ts2,1,1,TaskType.MAP);
assertTrue(t1.equals(t4));
assertFalse(t1.equals(t2));
assertFalse(t1.equals(t3));
assertFalse(t1.equals(t5));
assertTrue(t1.compareTo(t4) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) > 0);
assertTrue(t1.compareTo(t5) < 0);
assertTrue(t1.hashCode() == t4.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskId t6=createTaskId(ts1,324151,54643747,TaskType.REDUCE);
assertEquals("task_" + ts1 + "_0001_m_000002",t1.toString());
assertEquals("task_" + ts1 + "_324151_r_54643747",t6.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobId(){
long ts1=1315890136000l;
long ts2=1315890136001l;
JobId j1=createJobId(ts1,2);
JobId j2=createJobId(ts1,1);
JobId j3=createJobId(ts2,1);
JobId j4=createJobId(ts1,2);
assertTrue(j1.equals(j4));
assertFalse(j1.equals(j2));
assertFalse(j1.equals(j3));
assertTrue(j1.compareTo(j4) == 0);
assertTrue(j1.compareTo(j2) > 0);
assertTrue(j1.compareTo(j3) < 0);
assertTrue(j1.hashCode() == j4.hashCode());
assertFalse(j1.hashCode() == j2.hashCode());
assertFalse(j1.hashCode() == j3.hashCode());
JobId j5=createJobId(ts1,231415);
assertEquals("job_" + ts1 + "_0002",j1.toString());
assertEquals("job_" + ts1 + "_231415",j5.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAMInfosWithoutRecoveryEnabled() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(1,0,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long am1StartTime=app.getAllAMInfos().get(0).getStartTime();
Assert.assertEquals("No of tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt taskAttempt=mapTask.getAttempts().values().iterator().next();
app.waitForState(taskAttempt,TaskAttemptState.RUNNING);
app.stop();
app=new MRAppWithHistory(1,0,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,false);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",1,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask=it.next();
List amInfos=app.getAllAMInfos();
Assert.assertEquals(2,amInfos.size());
AMInfo amInfoOne=amInfos.get(0);
Assert.assertEquals(am1StartTime,amInfoOne.getStartTime());
app.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskFailWithUnusedContainer() throws Exception {
MRApp app=new MRAppWithFailingTaskAndUnusedContainer();
Configuration conf=new Configuration();
int maxAttempts=1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
app.waitForState(task,TaskState.SCHEDULED);
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size());
TaskAttempt attempt=attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl)attempt,TaskAttemptStateInternal.ASSIGNED);
app.getDispatcher().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_CONTAINER_COMPLETED));
app.waitForState(job,JobState.FAILED);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFailTask() throws Exception {
MRApp app=new MockFirstFailingAttemptMRApp(1,0);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task.getReport().getTaskState());
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",2,attempts.size());
Iterator it=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.FAILED,it.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,it.next().getReport().getTaskAttemptState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTimedOutTask() throws Exception {
MRApp app=new TimeOutTaskMRApp(1,0);
Configuration conf=new Configuration();
int maxAttempts=2;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.FAILED);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
Assert.assertEquals("Task state not correct",TaskState.FAILED,task.getReport().getTaskState());
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size());
for ( TaskAttempt attempt : attempts.values()) {
Assert.assertEquals("Attempt state not correct",TaskAttemptState.FAILED,attempt.getReport().getTaskAttemptState());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFetchFailureMultipleReduces() throws Exception {
MRApp app=new MRApp(1,3,false,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",4,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
Task reduceTask2=it.next();
Task reduceTask3=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",1,events.length);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus());
app.waitForState(reduceTask,TaskState.RUNNING);
app.waitForState(reduceTask2,TaskState.RUNNING);
app.waitForState(reduceTask3,TaskState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
updateStatus(app,reduceAttempt,Phase.SHUFFLE);
TaskAttempt reduceAttempt2=reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt2,TaskAttemptState.RUNNING);
updateStatus(app,reduceAttempt2,Phase.SHUFFLE);
TaskAttempt reduceAttempt3=reduceTask3.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt3,TaskAttemptState.RUNNING);
updateStatus(app,reduceAttempt3,Phase.SHUFFLE);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
assertEquals(TaskState.SUCCEEDED,mapTask.getState());
updateStatus(app,reduceAttempt2,Phase.REDUCE);
updateStatus(app,reduceAttempt3,Phase.REDUCE);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
app.waitForState(mapTask,TaskState.RUNNING);
Assert.assertEquals("Map TaskAttempt state not correct",TaskAttemptState.FAILED,mapAttempt1.getState());
Assert.assertEquals("Num attempts in Map Task not correct",2,mapTask.getAttempts().size());
Iterator atIt=mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2=atIt.next();
app.waitForState(mapAttempt2,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt2.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt3.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",6,events.length);
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt2.getID(),events[2].getAttemptId());
Assert.assertEquals("Event reduce attempt id not correct",reduceAttempt.getID(),events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.FAILED,events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2",TaskAttemptCompletionEventStatus.SUCCEEDED,events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1",TaskAttemptCompletionEventStatus.SUCCEEDED,events[3].getStatus());
TaskCompletionEvent mapEvents[]=job.getMapAttemptCompletionEvents(0,2);
TaskCompletionEvent convertedEvents[]=TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events",2,mapEvents.length);
Assert.assertArrayEquals("Unexpected map events",Arrays.copyOfRange(convertedEvents,0,2),mapEvents);
mapEvents=job.getMapAttemptCompletionEvents(2,200);
Assert.assertEquals("Incorrect number of map events",1,mapEvents.length);
Assert.assertEquals("Unexpected map event",convertedEvents[2],mapEvents[0]);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFetchFailure() throws Exception {
MRApp app=new MRApp(1,1,false,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",2,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",1,events.length);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus());
app.waitForState(reduceTask,TaskState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
app.waitForState(mapTask,TaskState.RUNNING);
Assert.assertEquals("Map TaskAttempt state not correct",TaskAttemptState.FAILED,mapAttempt1.getState());
Assert.assertEquals("Num attempts in Map Task not correct",2,mapTask.getAttempts().size());
Iterator atIt=mapTask.getAttempts().values().iterator();
atIt.next();
TaskAttempt mapAttempt2=atIt.next();
app.waitForState(mapAttempt2,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",4,events.length);
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[0].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[1].getAttemptId());
Assert.assertEquals("Event map attempt id not correct",mapAttempt2.getID(),events[2].getAttemptId());
Assert.assertEquals("Event redude attempt id not correct",reduceAttempt.getID(),events[3].getAttemptId());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus());
Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.FAILED,events[1].getStatus());
Assert.assertEquals("Event status not correct for map attempt2",TaskAttemptCompletionEventStatus.SUCCEEDED,events[2].getStatus());
Assert.assertEquals("Event status not correct for reduce attempt1",TaskAttemptCompletionEventStatus.SUCCEEDED,events[3].getStatus());
TaskCompletionEvent mapEvents[]=job.getMapAttemptCompletionEvents(0,2);
TaskCompletionEvent convertedEvents[]=TypeConverter.fromYarn(events);
Assert.assertEquals("Incorrect number of map events",2,mapEvents.length);
Assert.assertArrayEquals("Unexpected map events",Arrays.copyOfRange(convertedEvents,0,2),mapEvents);
mapEvents=job.getMapAttemptCompletionEvents(2,200);
Assert.assertEquals("Incorrect number of map events",1,mapEvents.length);
Assert.assertEquals("Unexpected map event",convertedEvents[2],mapEvents[0]);
}
InternalCallVerifier EqualityVerifier
/**
* This tests that if a map attempt was failed (say due to fetch failures),
* then it gets re-run. When the next map attempt is running, if the AM dies,
* then, on AM re-run, the AM does not incorrectly remember the first failed
* attempt. Currently recovery does not recover running tasks. Effectively,
* the AM re-runs the maps from scratch.
*/
@Test public void testFetchFailureWithRecovery() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(1,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",2,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",1,events.length);
Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus());
app.waitForState(reduceTask,TaskState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
sendFetchFailure(app,reduceAttempt,mapAttempt1);
app.waitForState(mapTask,TaskState.RUNNING);
app.stop();
app=new MRAppWithHistory(1,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",2,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask=it.next();
reduceTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
mapAttempt1=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Num completion events not correct",2,events.length);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNotificationOnLastRetryNormalShutdown() throws Exception {
HttpServer2 server=startHttpServer();
MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,true,this.getClass().getName(),true,2,true));
doNothing().when(app).sysexit();
JobConf conf=new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job=(JobImpl)app.submit(conf);
app.waitForInternalState(job,JobStateInternal.SUCCEEDED);
app.shutDownJob();
Assert.assertTrue(app.isLastAMRetry());
Assert.assertEquals(1,JobEndServlet.calledTimes);
Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",JobEndServlet.requestUri.getQuery());
Assert.assertEquals(JobState.SUCCEEDED.toString(),JobEndServlet.foundJobState);
server.stop();
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testNotificationOnLastRetryUnregistrationFailure() throws Exception {
HttpServer2 server=startHttpServer();
MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,false,this.getClass().getName(),true,2,false));
app.isLastAMRetry=true;
doNothing().when(app).sysexit();
JobConf conf=new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job=(JobImpl)app.submit(conf);
app.waitForState(job,JobState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(),JobEventType.JOB_AM_REBOOT));
app.waitForInternalState(job,JobStateInternal.REBOOT);
app.waitForServiceToStop(10000);
Assert.assertFalse(app.isLastAMRetry());
Assert.assertEquals(0,JobEndServlet.calledTimes);
Assert.assertNull(JobEndServlet.requestUri);
Assert.assertNull(JobEndServlet.foundJobState);
server.stop();
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAbsentNotificationOnNotLastRetryUnregistrationFailure() throws Exception {
HttpServer2 server=startHttpServer();
MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,false,this.getClass().getName(),true,1,false));
doNothing().when(app).sysexit();
JobConf conf=new JobConf();
conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus");
JobImpl job=(JobImpl)app.submit(conf);
app.waitForState(job,JobState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(),JobEventType.JOB_AM_REBOOT));
app.waitForInternalState(job,JobStateInternal.REBOOT);
app.shutDownJob();
app.waitForState(job,JobState.RUNNING);
Assert.assertFalse(app.isLastAMRetry());
Assert.assertEquals(0,JobEndServlet.calledTimes);
Assert.assertNull(JobEndServlet.requestUri);
Assert.assertNull(JobEndServlet.foundJobState);
server.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testKillTaskAttempt() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
MRApp app=new BlockingMRApp(2,0,latch);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("No of tasks is not correct",2,tasks.size());
Iterator it=tasks.values().iterator();
Task task1=it.next();
Task task2=it.next();
app.waitForState(task1,TaskState.SCHEDULED);
app.waitForState(task2,TaskState.SCHEDULED);
TaskAttempt attempt=task1.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_KILL));
latch.countDown();
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task1.getReport().getTaskState());
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task2.getReport().getTaskState());
Map attempts=task1.getAttempts();
Assert.assertEquals("No of attempts is not correct",2,attempts.size());
Iterator iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,iter.next().getReport().getTaskAttemptState());
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState());
attempts=task2.getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testKillJob() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
MRApp app=new BlockingMRApp(1,0,latch);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(job.getID(),JobEventType.JOB_KILL));
latch.countDown();
app.waitForState(job,JobState.KILLED);
Map tasks=job.getTasks();
Assert.assertEquals("No of tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
Assert.assertEquals("Task state not correct",TaskState.KILLED,task.getReport().getTaskState());
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
Iterator it=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,it.next().getReport().getTaskAttemptState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testKillTask() throws Exception {
final CountDownLatch latch=new CountDownLatch(1);
MRApp app=new BlockingMRApp(2,0,latch);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("No of tasks is not correct",2,tasks.size());
Iterator it=tasks.values().iterator();
Task task1=it.next();
Task task2=it.next();
app.getContext().getEventHandler().handle(new TaskEvent(task1.getID(),TaskEventType.T_KILL));
latch.countDown();
app.waitForState(job,JobState.SUCCEEDED);
Assert.assertEquals("Task state not correct",TaskState.KILLED,task1.getReport().getTaskState());
Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task2.getReport().getTaskState());
Map attempts=task1.getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
Iterator iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,iter.next().getReport().getTaskAttemptState());
attempts=task2.getAttempts();
Assert.assertEquals("No of attempts is not correct",1,attempts.size());
iter=attempts.values().iterator();
Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState());
}
InternalCallVerifier EqualityVerifier
@Test public void testKillTaskWaitKillJobAfterTA_DONE() throws Exception {
CountDownLatch latch=new CountDownLatch(1);
final Dispatcher dispatcher=new MyAsyncDispatch(latch,TaskAttemptEventType.TA_DONE);
MRApp app=new MRApp(1,1,false,this.getClass().getName(),true){
@Override public Dispatcher createDispatcher(){
return dispatcher;
}
}
;
Job job=app.submit(new Configuration());
JobId jobId=app.getJobId();
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",2,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
app.waitForState(reduceTask,TaskState.RUNNING);
TaskAttempt mapAttempt=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt,TaskAttemptState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new JobEvent(jobId,JobEventType.JOB_KILL));
latch.countDown();
app.waitForInternalState((JobImpl)job,JobStateInternal.KILLED);
}
InternalCallVerifier EqualityVerifier
@Test public void testKillTaskWait() throws Exception {
final Dispatcher dispatcher=new AsyncDispatcher(){
private TaskAttemptEvent cachedKillEvent;
@Override protected void dispatch( Event event){
if (event instanceof TaskAttemptEvent) {
TaskAttemptEvent killEvent=(TaskAttemptEvent)event;
if (killEvent.getType() == TaskAttemptEventType.TA_KILL) {
TaskAttemptId taID=killEvent.getTaskAttemptID();
if (taID.getTaskId().getTaskType() == TaskType.REDUCE && taID.getTaskId().getId() == 0 && taID.getId() == 0) {
super.dispatch(new TaskAttemptEvent(taID,TaskAttemptEventType.TA_DONE));
super.dispatch(new TaskAttemptEvent(taID,TaskAttemptEventType.TA_CONTAINER_CLEANED));
super.dispatch(new TaskTAttemptEvent(taID,TaskEventType.T_ATTEMPT_SUCCEEDED));
this.cachedKillEvent=killEvent;
return;
}
}
}
else if (event instanceof TaskEvent) {
TaskEvent taskEvent=(TaskEvent)event;
if (taskEvent.getType() == TaskEventType.T_ATTEMPT_SUCCEEDED && this.cachedKillEvent != null) {
super.dispatch(this.cachedKillEvent);
return;
}
}
super.dispatch(event);
}
}
;
MRApp app=new MRApp(1,1,false,this.getClass().getName(),true){
@Override public Dispatcher createDispatcher(){
return dispatcher;
}
}
;
Job job=app.submit(new Configuration());
JobId jobId=app.getJobId();
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",2,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask,TaskState.RUNNING);
app.waitForState(reduceTask,TaskState.RUNNING);
TaskAttempt mapAttempt=mapTask.getAttempts().values().iterator().next();
app.waitForState(mapAttempt,TaskAttemptState.RUNNING);
TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next();
app.waitForState(reduceAttempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new JobEvent(jobId,JobEventType.JOB_KILL));
app.waitForInternalState((JobImpl)job,JobStateInternal.KILLED);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobRebootNotLastRetryOnUnregistrationFailure() throws Exception {
MRApp app=new MRApp(1,0,false,this.getClass().getName(),true);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task task=it.next();
app.waitForState(task,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT));
app.waitForState(job,JobState.RUNNING);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobRebootOnLastRetryOnUnregistrationFailure() throws Exception {
MRApp app=new MRApp(1,0,false,this.getClass().getName(),true,2,false);
Configuration conf=new Configuration();
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task task=it.next();
app.waitForState(task,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT));
app.waitForInternalState((JobImpl)job,JobStateInternal.REBOOT);
app.waitForState(job,JobState.RUNNING);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testContainerPassThrough() throws Exception {
MRApp app=new MRApp(0,1,true,this.getClass().getName(),true){
@Override protected ContainerLauncher createContainerLauncher( AppContext context){
return new MockContainerLauncher(){
@Override public void handle( ContainerLauncherEvent event){
if (event instanceof ContainerRemoteLaunchEvent) {
containerObtainedByContainerLauncher=((ContainerRemoteLaunchEvent)event).getAllocatedContainer();
}
super.handle(event);
}
}
;
}
}
;
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Collection tasks=job.getTasks().values();
Collection taskAttempts=tasks.iterator().next().getAttempts().values();
TaskAttemptImpl taskAttempt=(TaskAttemptImpl)taskAttempts.iterator().next();
Assert.assertTrue(taskAttempt.container == containerObtainedByContainerLauncher);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobError() throws Exception {
MRApp app=new MRApp(1,0,false,this.getClass().getName(),true);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task task=it.next();
app.waitForState(task,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskEvent(task.getID(),TaskEventType.T_SCHEDULE));
app.waitForState(job,JobState.ERROR);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* The test verifies that the AM re-runs maps that have run on bad nodes. It
* also verifies that the AM records all success/killed events so that reduces
* are notified about map output status changes. It also verifies that the
* re-run information is preserved across AM restart
*/
@Test public void testUpdatedNodes() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(2,2,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,0.5f);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",4,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
NodeId node1=task1Attempt.getNodeId();
NodeId node2=task2Attempt.getNodeId();
Assert.assertEquals(node1,node2);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 completion events for success",2,events.length);
ArrayList updatedNodes=new ArrayList();
NodeReport nr=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(NodeReport.class);
nr.setNodeId(node1);
nr.setNodeState(NodeState.UNHEALTHY);
updatedNodes.add(nr);
app.getContext().getEventHandler().handle(new JobUpdatedNodesEvent(job.getID(),updatedNodes));
app.waitForState(task1Attempt,TaskAttemptState.KILLED);
app.waitForState(task2Attempt,TaskAttemptState.KILLED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 more completion events for killed",4,events.length);
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
Iterator itr=mapTask1.getAttempts().values().iterator();
itr.next();
task1Attempt=itr.next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 1 more completion events for success",5,events.length);
app.stop();
app=new MRAppWithHistory(2,2,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",4,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
Task reduceTask1=it.next();
Task reduceTask2=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 completion events for killed & success of map1",2,events.length);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 1 more completion events for success",3,events.length);
app.waitForState(reduceTask1,TaskState.RUNNING);
app.waitForState(reduceTask2,TaskState.RUNNING);
TaskAttempt task3Attempt=reduceTask1.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task3Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task3Attempt.getID(),TaskAttemptEventType.TA_KILL));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
TaskAttempt task4Attempt=reduceTask2.getAttempts().values().iterator().next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task4Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask2,TaskState.SUCCEEDED);
events=job.getTaskAttemptCompletionEvents(0,100);
Assert.assertEquals("Expecting 2 more completion events for reduce success",5,events.length);
app.waitForState(job,JobState.SUCCEEDED);
}
InternalCallVerifier EqualityVerifier
@Test public void testMapReduce() throws Exception {
MRApp app=new MRApp(2,2,true,this.getClass().getName(),true);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals(System.getProperty("user.name"),job.getUserName());
}
InternalCallVerifier EqualityVerifier
@SuppressWarnings("resource") @Test public void testJobSuccess() throws Exception {
MRApp app=new MRApp(2,2,true,this.getClass().getName(),true,false);
JobImpl job=(JobImpl)app.submit(new Configuration());
app.waitForInternalState(job,JobStateInternal.SUCCEEDED);
Assert.assertEquals(JobState.RUNNING,job.getState());
app.successfullyUnregistered.set(true);
app.waitForState(job,JobState.SUCCEEDED);
}
InternalCallVerifier EqualityVerifier
@Test public void testCommitPending() throws Exception {
MRApp app=new MRApp(1,0,false,this.getClass().getName(),true);
Job job=app.submit(new Configuration());
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task task=it.next();
app.waitForState(task,TaskState.RUNNING);
TaskAttempt attempt=task.getAttempts().values().iterator().next();
app.waitForState(attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_COMMIT_PENDING));
app.waitForState(attempt,TaskAttemptState.COMMIT_PENDING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_COMMIT_PENDING));
app.waitForState(attempt,TaskAttemptState.COMMIT_PENDING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testComponentStopOrder() throws Exception {
@SuppressWarnings("resource") TestMRApp app=new TestMRApp(1,1,true,this.getClass().getName(),true);
JobImpl job=(JobImpl)app.submit(new Configuration());
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
int waitTime=20 * 1000;
while (waitTime > 0 && app.numStops < 2) {
Thread.sleep(100);
waitTime-=100;
}
Assert.assertEquals(1,app.JobHistoryEventHandlerStopped);
Assert.assertEquals(2,app.clientServiceStopped);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterFailLock() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start=MRApps.getStartJobCommitFile(conf,userName,jobId);
Path end=MRApps.getEndJobCommitFailureFile(conf,userName,jobId);
FileSystem fs=FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.FAILED,appMaster.forcedState);
appMaster.stop();
verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterSuccessLock() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start=MRApps.getStartJobCommitFile(conf,userName,jobId);
Path end=MRApps.getEndJobCommitSuccessFile(conf,userName,jobId);
FileSystem fs=FileSystem.get(conf);
fs.create(start).close();
fs.create(end).close();
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.SUCCEEDED,appMaster.forcedState);
appMaster.stop();
verifyFailedStatus((MRAppMasterTest)appMaster,"SUCCEEDED");
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterCredentials() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
Credentials credentials=new Credentials();
byte[] identifier="MyIdentifier".getBytes();
byte[] password="MyPassword".getBytes();
Text kind=new Text("MyTokenKind");
Text service=new Text("host:port");
Token extends TokenIdentifier> myToken=new Token(identifier,password,kind,service);
Text tokenAlias=new Text("myToken");
credentials.addToken(tokenAlias,myToken);
Text appTokenService=new Text("localhost:0");
Token appToken=new Token(identifier,password,AMRMTokenIdentifier.KIND_NAME,appTokenService);
credentials.addToken(appTokenService,appToken);
Text keyAlias=new Text("mySecretKeyAlias");
credentials.addSecretKey(keyAlias,"mySecretKey".getBytes());
Token extends TokenIdentifier> storedToken=credentials.getToken(tokenAlias);
JobConf conf=new JobConf();
Path tokenFilePath=new Path(testDir.getAbsolutePath(),"tokens-file");
Map newEnv=new HashMap();
newEnv.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION,tokenFilePath.toUri().getPath());
setNewEnvironmentHack(newEnv);
credentials.writeTokenStorageFile(tokenFilePath,conf);
ApplicationId appId=ApplicationId.newInstance(12345,56);
ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(applicationAttemptId,546);
String userName=UserGroupInformation.getCurrentUser().getShortUserName();
File stagingDir=new File(MRApps.getStagingAreaDir(conf,userName).toString());
stagingDir.mkdirs();
UserGroupInformation.setLoginUser(null);
MRAppMasterTest appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,true);
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
Credentials appMasterCreds=appMaster.getCredentials();
Assert.assertNotNull(appMasterCreds);
Assert.assertEquals(1,appMasterCreds.numberOfSecretKeys());
Assert.assertEquals(1,appMasterCreds.numberOfTokens());
Token extends TokenIdentifier> usedToken=appMasterCreds.getToken(tokenAlias);
Assert.assertNotNull(usedToken);
Assert.assertEquals(storedToken,usedToken);
byte[] usedKey=appMasterCreds.getSecretKey(keyAlias);
Assert.assertNotNull(usedKey);
Assert.assertEquals("mySecretKey",new String(usedKey));
Credentials confCredentials=conf.getCredentials();
Assert.assertEquals(1,confCredentials.numberOfSecretKeys());
Assert.assertEquals(1,confCredentials.numberOfTokens());
Assert.assertEquals(storedToken,confCredentials.getToken(tokenAlias));
Assert.assertEquals("mySecretKey",new String(confCredentials.getSecretKey(keyAlias)));
Credentials ugiCredentials=appMaster.getUgi().getCredentials();
Assert.assertEquals(1,ugiCredentials.numberOfSecretKeys());
Assert.assertEquals(2,ugiCredentials.numberOfTokens());
Assert.assertEquals(storedToken,ugiCredentials.getToken(tokenAlias));
Assert.assertEquals(appToken,ugiCredentials.getToken(appTokenService));
Assert.assertEquals("mySecretKey",new String(ugiCredentials.getSecretKey(keyAlias)));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMRAppMasterMidLock() throws IOException, InterruptedException {
String applicationAttemptIdStr="appattempt_1317529182569_0004_000002";
String containerIdStr="container_1317529182569_0004_000002_1";
String userName="TestAppMasterUser";
JobConf conf=new JobConf();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr);
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId()));
Path start=MRApps.getStartJobCommitFile(conf,userName,jobId);
FileSystem fs=FileSystem.get(conf);
fs.create(start).close();
ContainerId containerId=ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false);
boolean caught=false;
try {
MRAppMaster.initAndStartAppMaster(appMaster,conf,userName);
}
catch ( IOException e) {
LOG.info("Caught expected Exception",e);
caught=true;
}
assertTrue(caught);
assertTrue(appMaster.errorHappenedShutDown);
assertEquals(JobStateInternal.ERROR,appMaster.forcedState);
appMaster.stop();
verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED");
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void test() throws Exception {
MRAppWithClientService app=new MRAppWithClientService(1,0,false);
Configuration conf=new Configuration();
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task task=it.next();
app.waitForState(task,TaskState.RUNNING);
TaskAttempt attempt=task.getAttempts().values().iterator().next();
app.waitForState(attempt,TaskAttemptState.RUNNING);
String diagnostic1="Diagnostic1";
String diagnostic2="Diagnostic2";
app.getContext().getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(attempt.getID(),diagnostic1));
TaskAttemptStatus taskAttemptStatus=new TaskAttemptStatus();
taskAttemptStatus.id=attempt.getID();
taskAttemptStatus.progress=0.5f;
taskAttemptStatus.stateString="RUNNING";
taskAttemptStatus.taskState=TaskAttemptState.RUNNING;
taskAttemptStatus.phase=Phase.MAP;
app.getContext().getEventHandler().handle(new TaskAttemptStatusUpdateEvent(attempt.getID(),taskAttemptStatus));
YarnRPC rpc=YarnRPC.create(conf);
MRClientProtocol proxy=(MRClientProtocol)rpc.getProxy(MRClientProtocol.class,app.clientService.getBindAddress(),conf);
GetCountersRequest gcRequest=recordFactory.newRecordInstance(GetCountersRequest.class);
gcRequest.setJobId(job.getID());
Assert.assertNotNull("Counters is null",proxy.getCounters(gcRequest).getCounters());
GetJobReportRequest gjrRequest=recordFactory.newRecordInstance(GetJobReportRequest.class);
gjrRequest.setJobId(job.getID());
JobReport jr=proxy.getJobReport(gjrRequest).getJobReport();
verifyJobReport(jr);
GetTaskAttemptCompletionEventsRequest gtaceRequest=recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
gtaceRequest.setJobId(job.getID());
gtaceRequest.setFromEventId(0);
gtaceRequest.setMaxEvents(10);
Assert.assertNotNull("TaskCompletionEvents is null",proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList());
GetDiagnosticsRequest gdRequest=recordFactory.newRecordInstance(GetDiagnosticsRequest.class);
gdRequest.setTaskAttemptId(attempt.getID());
Assert.assertNotNull("Diagnostics is null",proxy.getDiagnostics(gdRequest).getDiagnosticsList());
GetTaskAttemptReportRequest gtarRequest=recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
gtarRequest.setTaskAttemptId(attempt.getID());
TaskAttemptReport tar=proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport();
verifyTaskAttemptReport(tar);
GetTaskReportRequest gtrRequest=recordFactory.newRecordInstance(GetTaskReportRequest.class);
gtrRequest.setTaskId(task.getID());
Assert.assertNotNull("TaskReport is null",proxy.getTaskReport(gtrRequest).getTaskReport());
GetTaskReportsRequest gtreportsRequest=recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(job.getID());
gtreportsRequest.setTaskType(TaskType.MAP);
Assert.assertNotNull("TaskReports for map is null",proxy.getTaskReports(gtreportsRequest).getTaskReportList());
gtreportsRequest=recordFactory.newRecordInstance(GetTaskReportsRequest.class);
gtreportsRequest.setJobId(job.getID());
gtreportsRequest.setTaskType(TaskType.REDUCE);
Assert.assertNotNull("TaskReports for reduce is null",proxy.getTaskReports(gtreportsRequest).getTaskReportList());
List diag=proxy.getDiagnostics(gdRequest).getDiagnosticsList();
Assert.assertEquals("Num diagnostics not correct",1,diag.size());
Assert.assertEquals("Diag 1 not correct",diagnostic1,diag.get(0).toString());
TaskReport taskReport=proxy.getTaskReport(gtrRequest).getTaskReport();
Assert.assertEquals("Num diagnostics not correct",1,taskReport.getDiagnosticsCount());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testViewAclOnlyCannotModify() throws Exception {
final MRAppWithClientService app=new MRAppWithClientService(1,0,false);
final Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf.set(MRJobConfig.JOB_ACL_VIEW_JOB,"viewonlyuser");
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("Num tasks not correct",1,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task task=it.next();
app.waitForState(task,TaskState.RUNNING);
TaskAttempt attempt=task.getAttempts().values().iterator().next();
app.waitForState(attempt,TaskAttemptState.RUNNING);
UserGroupInformation viewOnlyUser=UserGroupInformation.createUserForTesting("viewonlyuser",new String[]{});
Assert.assertTrue("viewonlyuser cannot view job",job.checkAccess(viewOnlyUser,JobACL.VIEW_JOB));
Assert.assertFalse("viewonlyuser can modify job",job.checkAccess(viewOnlyUser,JobACL.MODIFY_JOB));
MRClientProtocol client=viewOnlyUser.doAs(new PrivilegedExceptionAction(){
@Override public MRClientProtocol run() throws Exception {
YarnRPC rpc=YarnRPC.create(conf);
return (MRClientProtocol)rpc.getProxy(MRClientProtocol.class,app.clientService.getBindAddress(),conf);
}
}
);
KillJobRequest killJobRequest=recordFactory.newRecordInstance(KillJobRequest.class);
killJobRequest.setJobId(app.getJobId());
try {
client.killJob(killJobRequest);
fail("viewonlyuser killed job");
}
catch ( AccessControlException e) {
}
KillTaskRequest killTaskRequest=recordFactory.newRecordInstance(KillTaskRequest.class);
killTaskRequest.setTaskId(task.getID());
try {
client.killTask(killTaskRequest);
fail("viewonlyuser killed task");
}
catch ( AccessControlException e) {
}
KillTaskAttemptRequest killTaskAttemptRequest=recordFactory.newRecordInstance(KillTaskAttemptRequest.class);
killTaskAttemptRequest.setTaskAttemptId(attempt.getID());
try {
client.killTaskAttempt(killTaskAttemptRequest);
fail("viewonlyuser killed task attempt");
}
catch ( AccessControlException e) {
}
FailTaskAttemptRequest failTaskAttemptRequest=recordFactory.newRecordInstance(FailTaskAttemptRequest.class);
failTaskAttemptRequest.setTaskAttemptId(attempt.getID());
try {
client.failTaskAttempt(failTaskAttemptRequest);
fail("viewonlyuser killed task attempt");
}
catch ( AccessControlException e) {
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOutputRecovery() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(1,2,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task reduceTask1=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.RUNNING);
TaskAttempt reduce1Attempt1=reduceTask1.getAttempts().values().iterator().next();
writeOutput(reduce1Attempt1,conf);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(1,2,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
reduceTask1=it.next();
Task reduceTask2=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
task1Attempt1=mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.waitForState(reduceTask2,TaskState.RUNNING);
TaskAttempt reduce2Attempt=reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduce2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask2,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRecoveryWithoutShuffleSecret() throws Exception {
int runCount=0;
MRApp app=new MRAppNoShuffleSecret(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task1Attempt,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.stop();
app=new MRAppNoShuffleSecret(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(mapTask1,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask1.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
InternalCallVerifier EqualityVerifier
/**
* This test case primarily verifies if the recovery is controlled through config
* property. In this case, recover is turned ON. AM with 3 maps and 0 reduce.
* AM crashes after the first two tasks finishes and recovers completely and
* succeeds in the second generation.
* @throws Exception
*/
@Test public void testRecoverySuccessUsingCustomOutputCommitter() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(3,0,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean("want.am.recovery",true);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task mapTask3=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.waitForState(mapTask3,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
TaskAttempt task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task1Attempt,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class);
conf.setBoolean("want.am.recovery",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setInt(MRJobConfig.NUM_REDUCES,0);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
mapTask3=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(mapTask3,TaskState.RUNNING);
task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask3,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testOutputRecoveryMapsOnly() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask1=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
writeBadOutput(task1Attempt1,conf);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask1=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
task1Attempt1=mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task2Attempt1=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
Assert.assertEquals(5467,task2Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.RUNNING);
TaskAttempt reduce1Attempt1=reduceTask1.getAttempts().values().iterator().next();
writeOutput(reduce1Attempt1,conf);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testSpeculative() throws Exception {
int runCount=0;
long am1StartTimeEst=System.currentTimeMillis();
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long jobStartTime=job.getReport().getStartTime();
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskEvent(mapTask1.getID(),TaskEventType.T_ADD_SPEC_ATTEMPT));
int timeOut=0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(1000);
LOG.info("Waiting for next attempt to start");
}
Iterator t1it=mapTask1.getAttempts().values().iterator();
TaskAttempt task1Attempt1=t1it.next();
TaskAttempt task1Attempt2=t1it.next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
ContainerId t1a2contId=task1Attempt2.getAssignedContainerID();
LOG.info(t1a2contId.toString());
LOG.info(task1Attempt1.getID().toString());
LOG.info(task1Attempt2.getID().toString());
app.getContext().getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(task1Attempt2.getID(),runCount));
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task1Attempt2,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(task1Attempt1,TaskAttemptState.SUCCEEDED);
app.waitForState(mapTask1,TaskState.SUCCEEDED);
long task1StartTime=mapTask1.getReport().getStartTime();
long task1FinishTime=mapTask1.getReport().getFinishTime();
app.stop();
long am2StartTimeEst=System.currentTimeMillis();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime());
Assert.assertEquals(2,job.getAMInfos().size());
int attemptNum=1;
for ( AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal=job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal=job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
}
InternalCallVerifier EqualityVerifier
/**
* This test case primarily verifies if the recovery is controlled through config
* property. In this case, recover is turned OFF. AM with 3 maps and 0 reduce.
* AM crashes after the first two tasks finishes and recovery fails and have
* to rerun fully in the second generation and succeeds.
* @throws Exception
*/
@Test public void testRecoveryFailsUsingCustomOutputCommitter() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(3,0,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean("want.am.recovery",false);
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task mapTask3=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.waitForState(mapTask3,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
TaskAttempt task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task1Attempt,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class);
conf.setBoolean("want.am.recovery",false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setInt(MRJobConfig.NUM_REDUCES,0);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
mapTask3=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.waitForState(mapTask3,TaskState.RUNNING);
task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask1.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask3,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRecoveryWithOldCommiter() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(1,2,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",false);
conf.setBoolean("mapred.reducer.new-api",false);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task reduceTask1=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.RUNNING);
TaskAttempt reduce1Attempt1=reduceTask1.getAttempts().values().iterator().next();
writeOutput(reduce1Attempt1,conf);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(1,2,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",false);
conf.setBoolean("mapred.reducer.new-api",false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
reduceTask1=it.next();
Task reduceTask2=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
task1Attempt1=mapTask1.getAttempts().values().iterator().next();
Assert.assertEquals(5467,task1Attempt1.getShufflePort());
app.waitForState(reduceTask1,TaskState.SUCCEEDED);
app.waitForState(reduceTask2,TaskState.RUNNING);
TaskAttempt reduce2Attempt=reduceTask2.getAttempts().values().iterator().next();
app.waitForState(reduce2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(reduceTask2,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
validateOutput();
}
InternalCallVerifier EqualityVerifier
/**
* AM with 3 maps and 0 reduce. AM crashes after the first two tasks finishes
* and recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testCrashOfMapsOnlyJob() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(3,0,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task mapTask3=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
app.waitForState(mapTask3,TaskState.RUNNING);
TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
TaskAttempt task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task1Attempt,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setInt(MRJobConfig.NUM_REDUCES,0);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
mapTask3=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(mapTask3,TaskState.RUNNING);
task3Attempt=mapTask3.getAttempts().values().iterator().next();
app.waitForState(task3Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask3,TaskState.SUCCEEDED);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMultipleCrashes() throws Exception {
int runCount=0;
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.stop();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
* completely disappears because of failed launch, one attempt gets killed and
* one attempt succeeds. AM crashes after the first tasks finishes and
* recovers completely and succeeds in the second generation.
* @throws Exception
*/
@Test public void testCrashed() throws Exception {
int runCount=0;
long am1StartTimeEst=System.currentTimeMillis();
MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount);
Configuration conf=new Configuration();
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
long jobStartTime=job.getReport().getStartTime();
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
Iterator it=job.getTasks().values().iterator();
Task mapTask1=it.next();
Task mapTask2=it.next();
Task reduceTask=it.next();
app.waitForState(mapTask1,TaskState.RUNNING);
app.waitForState(mapTask2,TaskState.RUNNING);
TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task1Attempt1,TaskAttemptState.RUNNING);
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState());
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_FAILMSG));
app.waitForState(task1Attempt1,TaskAttemptState.FAILED);
int timeOut=0;
while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(2,mapTask1.getAttempts().size());
Iterator itr=mapTask1.getAttempts().values().iterator();
itr.next();
TaskAttempt task1Attempt2=itr.next();
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt2.getID(),TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
app.waitForState(task1Attempt2,TaskAttemptState.FAILED);
timeOut=0;
while (mapTask1.getAttempts().size() != 3 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(3,mapTask1.getAttempts().size());
itr=mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
TaskAttempt task1Attempt3=itr.next();
app.waitForState(task1Attempt3,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt3.getID(),TaskAttemptEventType.TA_KILL));
app.waitForState(task1Attempt3,TaskAttemptState.KILLED);
timeOut=0;
while (mapTask1.getAttempts().size() != 4 && timeOut++ < 10) {
Thread.sleep(2000);
LOG.info("Waiting for next attempt to start");
}
Assert.assertEquals(4,mapTask1.getAttempts().size());
itr=mapTask1.getAttempts().values().iterator();
itr.next();
itr.next();
itr.next();
TaskAttempt task1Attempt4=itr.next();
app.waitForState(task1Attempt4,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt4.getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask1,TaskState.SUCCEEDED);
long task1StartTime=mapTask1.getReport().getStartTime();
long task1FinishTime=mapTask1.getReport().getFinishTime();
app.stop();
long am2StartTimeEst=System.currentTimeMillis();
app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount);
conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true);
conf.setBoolean("mapred.mapper.new-api",true);
conf.setBoolean("mapred.reducer.new-api",true);
conf.set(FileOutputFormat.OUTDIR,outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Assert.assertEquals("No of tasks not correct",3,job.getTasks().size());
it=job.getTasks().values().iterator();
mapTask1=it.next();
mapTask2=it.next();
reduceTask=it.next();
app.waitForState(mapTask1,TaskState.SUCCEEDED);
app.waitForState(mapTask2,TaskState.RUNNING);
task2Attempt=mapTask2.getAttempts().values().iterator().next();
app.waitForState(task2Attempt,TaskAttemptState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(mapTask2,TaskState.SUCCEEDED);
app.waitForState(reduceTask,TaskState.RUNNING);
app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE));
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime());
Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime());
Assert.assertEquals(2,job.getAMInfos().size());
int attemptNum=1;
for ( AMInfo amInfo : job.getAMInfos()) {
Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId());
Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId());
Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost());
Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort());
Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort());
}
long am1StartTimeReal=job.getAMInfos().get(0).getStartTime();
long am2StartTimeReal=job.getAMInfos().get(1).getStartTime();
Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst);
Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDeletionofStaging() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir);
fs=mock(FileSystem.class);
when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir=MRApps.getStagingAreaDir(conf,user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
JobId jobid=recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId);
ContainerAllocator mockAlloc=mock(ContainerAllocator.class);
Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.RUNNING,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
Assert.assertEquals(true,((TestMRApp)appMaster).getTestIsLastAMRetry());
verify(fs).delete(stagingJobPath,true);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testStagingCleanupOrder() throws Exception {
MRAppTestCleanup app=new MRAppTestCleanup(1,1,true,this.getClass().getName(),true);
JobImpl job=(JobImpl)app.submit(new Configuration());
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
int waitTime=20 * 1000;
while (waitTime > 0 && app.numStops < 2) {
Thread.sleep(100);
waitTime-=100;
}
Assert.assertEquals(1,app.ContainerAllocatorStopped);
Assert.assertEquals(2,app.stagingDirCleanedup);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testNoDeletionofStagingOnReboot() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir);
fs=mock(FileSystem.class);
when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true);
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingDir=MRApps.getStagingAreaDir(conf,user);
when(fs.exists(stagingDir)).thenReturn(true);
ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerAllocator mockAlloc=mock(ContainerAllocator.class);
Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1);
MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.REBOOT,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf);
appMaster.start();
appMaster.shutDownJob();
Assert.assertEquals(false,((TestMRApp)appMaster).getTestIsLastAMRetry());
verify(fs,times(0)).delete(stagingJobPath,true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testBasic() throws Exception {
AppContext mockContext=mock(AppContext.class);
OutputCommitter mockCommitter=mock(OutputCommitter.class);
Clock mockClock=mock(Clock.class);
CommitterEventHandler handler=new CommitterEventHandler(mockContext,mockCommitter,new TestingRMHeartbeatHandler());
YarnConfiguration conf=new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
JobContext mockJobContext=mock(JobContext.class);
ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(attemptid.getApplicationId()));
WaitForItHandler waitForItHandler=new WaitForItHandler();
when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
when(mockContext.getClock()).thenReturn(mockClock);
handler.init(conf);
handler.start();
try {
handler.handle(new CommitterJobCommitEvent(jobId,mockJobContext));
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path startCommitFile=MRApps.getStartJobCommitFile(conf,user,jobId);
Path endCommitSuccessFile=MRApps.getEndJobCommitSuccessFile(conf,user,jobId);
Path endCommitFailureFile=MRApps.getEndJobCommitFailureFile(conf,user,jobId);
Event e=waitForItHandler.getAndClearEvent();
assertNotNull(e);
assertTrue(e instanceof JobCommitCompletedEvent);
FileSystem fs=FileSystem.get(conf);
assertTrue(startCommitFile.toString(),fs.exists(startCommitFile));
assertTrue(endCommitSuccessFile.toString(),fs.exists(endCommitSuccessFile));
assertFalse(endCommitFailureFile.toString(),fs.exists(endCommitFailureFile));
verify(mockCommitter).commitJob(any(JobContext.class));
}
finally {
handler.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testFailure() throws Exception {
AppContext mockContext=mock(AppContext.class);
OutputCommitter mockCommitter=mock(OutputCommitter.class);
Clock mockClock=mock(Clock.class);
CommitterEventHandler handler=new CommitterEventHandler(mockContext,mockCommitter,new TestingRMHeartbeatHandler());
YarnConfiguration conf=new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
JobContext mockJobContext=mock(JobContext.class);
ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(attemptid.getApplicationId()));
WaitForItHandler waitForItHandler=new WaitForItHandler();
when(mockContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(mockContext.getApplicationAttemptId()).thenReturn(attemptid);
when(mockContext.getEventHandler()).thenReturn(waitForItHandler);
when(mockContext.getClock()).thenReturn(mockClock);
doThrow(new YarnRuntimeException("Intentional Failure")).when(mockCommitter).commitJob(any(JobContext.class));
handler.init(conf);
handler.start();
try {
handler.handle(new CommitterJobCommitEvent(jobId,mockJobContext));
String user=UserGroupInformation.getCurrentUser().getShortUserName();
Path startCommitFile=MRApps.getStartJobCommitFile(conf,user,jobId);
Path endCommitSuccessFile=MRApps.getEndJobCommitSuccessFile(conf,user,jobId);
Path endCommitFailureFile=MRApps.getEndJobCommitFailureFile(conf,user,jobId);
Event e=waitForItHandler.getAndClearEvent();
assertNotNull(e);
assertTrue(e instanceof JobCommitFailedEvent);
FileSystem fs=FileSystem.get(conf);
assertTrue(fs.exists(startCommitFile));
assertFalse(fs.exists(endCommitSuccessFile));
assertTrue(fs.exists(endCommitFailureFile));
verify(mockCommitter).commitJob(any(JobContext.class));
}
finally {
handler.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testCommitWindow() throws Exception {
Configuration conf=new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
AsyncDispatcher dispatcher=new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
TestingJobEventHandler jeh=new TestingJobEventHandler();
dispatcher.register(JobEventType.class,jeh);
SystemClock clock=new SystemClock();
AppContext appContext=mock(AppContext.class);
ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0");
when(appContext.getApplicationID()).thenReturn(attemptid.getApplicationId());
when(appContext.getApplicationAttemptId()).thenReturn(attemptid);
when(appContext.getEventHandler()).thenReturn(dispatcher.getEventHandler());
when(appContext.getClock()).thenReturn(clock);
OutputCommitter committer=mock(OutputCommitter.class);
TestingRMHeartbeatHandler rmhh=new TestingRMHeartbeatHandler();
CommitterEventHandler ceh=new CommitterEventHandler(appContext,committer,rmhh);
ceh.init(conf);
ceh.start();
ceh.handle(new CommitterJobCommitEvent(null,null));
long timeToWaitMs=5000;
while (rmhh.getNumCallbacks() != 1 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs-=10;
}
Assert.assertEquals("committer did not register a heartbeat callback",1,rmhh.getNumCallbacks());
verify(committer,never()).commitJob(any(JobContext.class));
Assert.assertEquals("committer should not have committed",0,jeh.numCommitCompletedEvents);
rmhh.setLastHeartbeatTime(clock.getTime());
timeToWaitMs=5000;
while (jeh.numCommitCompletedEvents != 1 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs-=10;
}
Assert.assertEquals("committer did not complete commit after RM hearbeat",1,jeh.numCommitCompletedEvents);
verify(committer,times(1)).commitJob(any(JobContext.class));
cleanup();
ceh.handle(new CommitterJobCommitEvent(null,null));
timeToWaitMs=5000;
while (jeh.numCommitCompletedEvents != 2 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs-=10;
}
Assert.assertEquals("committer did not commit",2,jeh.numCommitCompletedEvents);
verify(committer,times(2)).commitJob(any(JobContext.class));
ceh.stop();
dispatcher.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testRebootedDuringCommit() throws Exception {
Configuration conf=new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS,2);
AsyncDispatcher dispatcher=new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
CyclicBarrier syncBarrier=new CyclicBarrier(2);
OutputCommitter committer=new WaitingOutputCommitter(syncBarrier,true);
CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext=mock(AppContext.class);
when(mockContext.isLastAMRetry()).thenReturn(true);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
JobImpl job=createRunningStubbedJob(conf,dispatcher,2,mockContext);
completeJobTasks(job);
assertJobState(job,JobStateInternal.COMMITTING);
syncBarrier.await();
job.handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT));
assertJobState(job,JobStateInternal.REBOOT);
Assert.assertEquals(JobState.RUNNING,job.getState());
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
Assert.assertEquals(JobState.ERROR,job.getState());
dispatcher.stop();
commitHandler.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testRebootedDuringSetup() throws Exception {
Configuration conf=new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir);
AsyncDispatcher dispatcher=new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer=new StubbedOutputCommitter(){
@Override public synchronized void setupJob( JobContext jobContext) throws IOException {
while (!Thread.interrupted()) {
try {
wait();
}
catch ( InterruptedException e) {
}
}
}
}
;
CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext=mock(AppContext.class);
when(mockContext.isLastAMRetry()).thenReturn(false);
JobImpl job=createStubbedJob(conf,dispatcher,2,mockContext);
JobId jobId=job.getID();
job.handle(new JobEvent(jobId,JobEventType.JOB_INIT));
assertJobState(job,JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job,JobStateInternal.SETUP);
job.handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT));
assertJobState(job,JobStateInternal.REBOOT);
Assert.assertEquals(JobState.RUNNING,job.getState());
dispatcher.stop();
commitHandler.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testMetaInfoSizeOverMax() throws Exception {
Configuration conf=new Configuration();
JobID jobID=JobID.forName("job_1234567890000_0001");
JobId jobId=TypeConverter.toYarn(jobID);
MRAppMetrics mrAppMetrics=MRAppMetrics.create();
JobImpl job=new JobImpl(jobId,ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,0),0),conf,mock(EventHandler.class),null,new JobTokenSecretManager(),new Credentials(),null,null,mrAppMetrics,null,true,null,0,null,null,null,null);
InitTransition initTransition=new InitTransition(){
@Override protected TaskSplitMetaInfo[] createSplits( JobImpl job, JobId jobId){
throw new YarnRuntimeException(EXCEPTIONMSG);
}
}
;
JobEvent mockJobEvent=mock(JobEvent.class);
JobStateInternal jobSI=initTransition.transition(job,mockJobEvent);
Assert.assertTrue("When init fails, return value from InitTransition.transition should equal NEW.",jobSI.equals(JobStateInternal.NEW));
Assert.assertTrue("Job diagnostics should contain YarnRuntimeException",job.getDiagnostics().toString().contains("YarnRuntimeException"));
Assert.assertTrue("Job diagnostics should contain " + EXCEPTIONMSG,job.getDiagnostics().toString().contains(EXCEPTIONMSG));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTransitionsAtFailed() throws IOException {
Configuration conf=new Configuration();
AsyncDispatcher dispatcher=new AsyncDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer=mock(OutputCommitter.class);
doThrow(new IOException("forcefail")).when(committer).setupJob(any(JobContext.class));
CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer);
commitHandler.init(conf);
commitHandler.start();
AppContext mockContext=mock(AppContext.class);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false);
JobImpl job=createStubbedJob(conf,dispatcher,2,mockContext);
JobId jobId=job.getID();
job.handle(new JobEvent(jobId,JobEventType.JOB_INIT));
assertJobState(job,JobStateInternal.INITED);
job.handle(new JobStartEvent(jobId));
assertJobState(job,JobStateInternal.FAILED);
job.handle(new JobEvent(jobId,JobEventType.JOB_TASK_COMPLETED));
assertJobState(job,JobStateInternal.FAILED);
job.handle(new JobEvent(jobId,JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
assertJobState(job,JobStateInternal.FAILED);
job.handle(new JobEvent(jobId,JobEventType.JOB_MAP_TASK_RESCHEDULED));
assertJobState(job,JobStateInternal.FAILED);
job.handle(new JobEvent(jobId,JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE));
assertJobState(job,JobStateInternal.FAILED);
Assert.assertEquals(JobState.RUNNING,job.getState());
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
Assert.assertEquals(JobState.FAILED,job.getState());
dispatcher.stop();
commitHandler.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testUberDecision() throws Exception {
Configuration conf=new Configuration();
boolean isUber=testUberDecision(conf);
Assert.assertFalse(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
isUber=testUberDecision(conf);
Assert.assertTrue(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES,0);
conf.setInt(MRJobConfig.NUM_REDUCES,1);
isUber=testUberDecision(conf);
Assert.assertFalse(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES,1);
conf.setInt(MRJobConfig.NUM_REDUCES,1);
isUber=testUberDecision(conf);
Assert.assertTrue(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.JOB_UBERTASK_MAXMAPS,1);
isUber=testUberDecision(conf);
Assert.assertFalse(isUber);
conf=new Configuration();
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,true);
conf.setInt(MRJobConfig.NUM_REDUCES,0);
conf.setInt(MRJobConfig.REDUCE_MEMORY_MB,2048);
conf.setInt(MRJobConfig.REDUCE_CPU_VCORES,10);
isUber=testUberDecision(conf);
Assert.assertTrue(isUber);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testReportDiagnostics() throws Exception {
JobID jobID=JobID.forName("job_1234567890000_0001");
JobId jobId=TypeConverter.toYarn(jobID);
final String diagMsg="some diagnostic message";
final JobDiagnosticsUpdateEvent diagUpdateEvent=new JobDiagnosticsUpdateEvent(jobId,diagMsg);
MRAppMetrics mrAppMetrics=MRAppMetrics.create();
AppContext mockContext=mock(AppContext.class);
when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true);
JobImpl job=new JobImpl(jobId,Records.newRecord(ApplicationAttemptId.class),new Configuration(),mock(EventHandler.class),null,mock(JobTokenSecretManager.class),null,new SystemClock(),null,mrAppMetrics,null,true,null,0,null,mockContext,null,null);
job.handle(diagUpdateEvent);
String diagnostics=job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
job=new JobImpl(jobId,Records.newRecord(ApplicationAttemptId.class),new Configuration(),mock(EventHandler.class),null,mock(JobTokenSecretManager.class),null,new SystemClock(),null,mrAppMetrics,null,true,null,0,null,mockContext,null,null);
job.handle(new JobEvent(jobId,JobEventType.JOB_KILL));
job.handle(diagUpdateEvent);
diagnostics=job.getReport().getDiagnostics();
Assert.assertNotNull(diagnostics);
Assert.assertTrue(diagnostics.contains(diagMsg));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCheckAccess(){
String user1=System.getProperty("user.name");
String user2=user1 + "1234";
UserGroupInformation ugi1=UserGroupInformation.createRemoteUser(user1);
UserGroupInformation ugi2=UserGroupInformation.createRemoteUser(user2);
JobID jobID=JobID.forName("job_1234567890000_0001");
JobId jobId=TypeConverter.toYarn(jobID);
Configuration conf1=new Configuration();
conf1.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf1.set(MRJobConfig.JOB_ACL_VIEW_JOB,"");
JobImpl job1=new JobImpl(jobId,null,conf1,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job1.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertFalse(job1.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf2=new Configuration();
conf2.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf2.set(MRJobConfig.JOB_ACL_VIEW_JOB,user2);
JobImpl job2=new JobImpl(jobId,null,conf2,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job2.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertTrue(job2.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf3=new Configuration();
conf3.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf3.set(MRJobConfig.JOB_ACL_VIEW_JOB,"*");
JobImpl job3=new JobImpl(jobId,null,conf3,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job3.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertTrue(job3.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf4=new Configuration();
conf4.setBoolean(MRConfig.MR_ACLS_ENABLED,false);
conf4.set(MRJobConfig.JOB_ACL_VIEW_JOB,"");
JobImpl job4=new JobImpl(jobId,null,conf4,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job4.checkAccess(ugi1,JobACL.VIEW_JOB));
Assert.assertTrue(job4.checkAccess(ugi2,JobACL.VIEW_JOB));
Configuration conf5=new Configuration();
conf5.setBoolean(MRConfig.MR_ACLS_ENABLED,true);
conf5.set(MRJobConfig.JOB_ACL_VIEW_JOB,"");
JobImpl job5=new JobImpl(jobId,null,conf5,null,null,null,null,null,null,null,null,true,user1,0,null,null,null,null);
Assert.assertTrue(job5.checkAccess(ugi1,null));
Assert.assertTrue(job5.checkAccess(ugi2,null));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCommandLine() throws Exception {
MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertEquals("[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java"+ " -Djava.net.preferIPv4Stack=true"+ " -Dhadoop.metrics.log.level=WARN"+ " -Xmx200m -Djava.io.tmpdir="+ MRApps.crossPlatformify("PWD")+ "/tmp"+ " -Dlog4j.configuration=container-log4j.properties"+ " -Dyarn.app.container.log.dir="+ " -Dyarn.app.container.log.filesize=0"+ " -Dhadoop.root.logger=INFO,CLA"+ " org.apache.hadoop.mapred.YarnChild 127.0.0.1"+ " 54321"+ " attempt_0_0000_m_000000_0"+ " 0"+ " 1>/stdout"+ " 2>/stderr ]",app.myCommandLine);
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("INFO,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testEnvironmentVariables() throws Exception {
MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true);
Configuration conf=new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_CLIENT_OPTS=test");
conf.setStrings(MRJobConfig.MAP_LOG_LEVEL,"WARN");
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,false);
Job job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("WARN,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
Assert.assertEquals("test",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
app=new MyMRApp(1,0,true,this.getClass().getName(),true);
conf=new Configuration();
conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_ROOT_LOGGER=trace");
job=app.submit(conf);
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
Assert.assertEquals("trace",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testShuffleProviders() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,1);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
Path jobFile=mock(Path.class);
EventHandler eventHandler=mock(EventHandler.class);
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(YarnConfiguration.NM_AUX_SERVICES,TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
String serviceName=TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
String serviceStr=String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,serviceName);
jobConf.set(serviceStr,TestShuffleHandler1.class.getName());
serviceName=TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID;
serviceStr=String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,serviceName);
jobConf.set(serviceStr,TestShuffleHandler2.class.getName());
jobConf.set(MRJobConfig.MAPREDUCE_JOB_SHUFFLE_PROVIDER_SERVICES,TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID + "," + TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID);
Credentials credentials=new Credentials();
Token jobToken=new Token(("tokenid").getBytes(),("tokenpw").getBytes(),new Text("tokenkind"),new Text("tokenservice"));
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,mock(TaskSplitMetaInfo.class),jobConf,taListener,jobToken,credentials,new SystemClock(),null);
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,taImpl.getID().toString());
ContainerLaunchContext launchCtx=TaskAttemptImpl.createContainerLaunchContext(null,jobConf,jobToken,taImpl.createRemoteTask(),TypeConverter.fromYarn(jobId),mock(WrappedJvmID.class),taListener,credentials);
Map serviceDataMap=launchCtx.getServiceData();
Assert.assertNotNull("TestShuffleHandler1 is missing",serviceDataMap.get(TestShuffleHandler1.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
Assert.assertNotNull("TestShuffleHandler2 is missing",serviceDataMap.get(TestShuffleHandler2.MAPREDUCE_TEST_SHUFFLE_SERVICEID));
Assert.assertTrue("mismatch number of services in map",serviceDataMap.size() == 3);
}
BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSingleRackRequest() throws Exception {
TaskAttemptImpl.RequestContainerTransition rct=new TaskAttemptImpl.RequestContainerTransition(false);
EventHandler eventHandler=mock(EventHandler.class);
String[] hosts=new String[3];
hosts[0]="host1";
hosts[1]="host2";
hosts[2]="host3";
TaskSplitMetaInfo splitInfo=new TaskSplitMetaInfo(hosts,0,128 * 1024 * 1024l);
TaskAttemptImpl mockTaskAttempt=createMapTaskAttemptImplForTest(eventHandler,splitInfo);
TaskAttemptEvent mockTAEvent=mock(TaskAttemptEvent.class);
rct.transition(mockTaskAttempt,mockTAEvent);
ArgumentCaptor arg=ArgumentCaptor.forClass(Event.class);
verify(eventHandler,times(2)).handle(arg.capture());
if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) {
Assert.fail("Second Event not of type ContainerRequestEvent");
}
ContainerRequestEvent cre=(ContainerRequestEvent)arg.getAllValues().get(1);
String[] requestedRacks=cre.getRacks();
assertEquals(1,requestedRacks.length);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFetchFailureAttemptFinishTime() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED);
assertTrue("Task Attempt finish time is not greater than 0",taImpl.getFinishTime() > 0);
Long finishTime=taImpl.getFinishTime();
Thread.sleep(5);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in Too Many Fetch Failure state",taImpl.getState(),TaskAttemptState.FAILED);
assertEquals("After TA_TOO_MANY_FETCH_FAILURE," + " Task attempt finish time is not the same ",finishTime,Long.valueOf(taImpl.getFinishTime()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDoubleTooManyFetchFailure() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in FAILED state",taImpl.getState(),TaskAttemptState.FAILED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in FAILED state, still",taImpl.getState(),TaskAttemptState.FAILED);
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLaunchFailedWhileKilling() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),null);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
assertFalse(eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local node",Locality.NODE_LOCAL,taImpl.getLocality());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTooManyFetchFailureAfterKill() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertEquals("Task attempt is not in KILLED state",taImpl.getState(),TaskAttemptState.KILLED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE));
assertEquals("Task attempt is not in KILLED state, still",taImpl.getState(),TaskAttemptState.KILLED);
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerCleanedWhileCommitting() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.1",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task attempt is not in commit pending state",taImpl.getState(),TaskAttemptState.COMMIT_PENDING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
assertEquals("Task attempt is assigned locally",Locality.OFF_SWITCH,taImpl.getLocality());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerKillWhileCommitPending() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING));
assertEquals("Task should be in COMMIT_PENDING state",TaskAttemptStateInternal.COMMIT_PENDING,taImpl.getInternalState());
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError);
assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testContainerKillAfterAssigned() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
assertEquals("Task attempt is not in assinged state",taImpl.getInternalState(),TaskAttemptStateInternal.ASSIGNED);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerCleanedWhileRunning() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED));
assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError);
assertEquals("Task attempt is not assigned on the local rack",Locality.RACK_LOCAL,taImpl.getLocality());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerKillWhileRunning() throws Exception {
ApplicationId appId=ApplicationId.newInstance(1,2);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
Path jobFile=mock(Path.class);
MockEventHandler eventHandler=new MockEventHandler();
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10");
TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class);
when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"});
AppContext appCtx=mock(AppContext.class);
ClusterInfo clusterInfo=mock(ClusterInfo.class);
Resource resource=mock(Resource.class);
when(appCtx.getClusterInfo()).thenReturn(clusterInfo);
when(resource.getMemory()).thenReturn(1024);
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx);
NodeId nid=NodeId.newInstance("127.0.0.2",0);
ContainerId contId=ContainerId.newInstance(appAttemptId,3);
Container container=mock(Container.class);
when(container.getId()).thenReturn(contId);
when(container.getNodeId()).thenReturn(nid);
when(container.getNodeHttpAddress()).thenReturn("localhost:0");
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE));
taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class)));
taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0));
assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING);
taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL));
assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError);
assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAttemptContainerRequest() throws Exception {
final Text SECRET_KEY_ALIAS=new Text("secretkeyalias");
final byte[] SECRET_KEY=("secretkey").getBytes();
Map acls=new HashMap(1);
acls.put(ApplicationAccessType.VIEW_APP,"otheruser");
ApplicationId appId=ApplicationId.newInstance(1,1);
JobId jobId=MRBuilderUtils.newJobId(appId,1);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP);
Path jobFile=mock(Path.class);
EventHandler eventHandler=mock(EventHandler.class);
TaskAttemptListener taListener=mock(TaskAttemptListener.class);
when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0));
JobConf jobConf=new JobConf();
jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class);
jobConf.setBoolean("fs.file.impl.disable.cache",true);
jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,"");
jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(jobConf);
Credentials credentials=new Credentials();
credentials.addSecretKey(SECRET_KEY_ALIAS,SECRET_KEY);
Token jobToken=new Token(("tokenid").getBytes(),("tokenpw").getBytes(),new Text("tokenkind"),new Text("tokenservice"));
TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,mock(TaskSplitMetaInfo.class),jobConf,taListener,jobToken,credentials,new SystemClock(),null);
jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,taImpl.getID().toString());
ContainerLaunchContext launchCtx=TaskAttemptImpl.createContainerLaunchContext(acls,jobConf,jobToken,taImpl.createRemoteTask(),TypeConverter.fromYarn(jobId),mock(WrappedJvmID.class),taListener,credentials);
Assert.assertEquals("ACLs mismatch",acls,launchCtx.getApplicationACLs());
Credentials launchCredentials=new Credentials();
DataInputByteBuffer dibb=new DataInputByteBuffer();
dibb.reset(launchCtx.getTokens());
launchCredentials.readTokenStorageStream(dibb);
for ( Token extends TokenIdentifier> token : credentials.getAllTokens()) {
Token extends TokenIdentifier> launchToken=launchCredentials.getToken(token.getService());
Assert.assertNotNull("Token " + token.getService() + " is missing",launchToken);
Assert.assertEquals("Token " + token.getService() + " mismatch",token,launchToken);
}
Assert.assertNotNull("Secret key missing",launchCredentials.getSecretKey(SECRET_KEY_ALIAS));
Assert.assertTrue("Secret key mismatch",Arrays.equals(SECRET_KEY,launchCredentials.getSecretKey(SECRET_KEY_ALIAS)));
}
InternalCallVerifier EqualityVerifier
@Test public void testCountersWithSpeculation(){
mockTask=new MockTaskImpl(jobId,partition,dispatcher.getEventHandler(),remoteJobConfFile,conf,taskAttemptListener,jobToken,credentials,clock,startCount,metrics,appContext,TaskType.MAP){
@Override protected int getMaxAttempts(){
return 1;
}
}
;
TaskId taskId=getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.RUNNING);
MockTaskAttemptImpl baseAttempt=getLastAttempt();
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.RUNNING);
MockTaskAttemptImpl specAttempt=getLastAttempt();
assertEquals(2,taskAttempts.size());
Counters specAttemptCounters=new Counters();
Counter cpuCounter=specAttemptCounters.findCounter(TaskCounter.CPU_MILLISECONDS);
cpuCounter.setValue(1000);
specAttempt.setCounters(specAttemptCounters);
commitTaskAttempt(specAttempt.getAttemptId());
specAttempt.setProgress(1.0f);
specAttempt.setState(TaskAttemptState.SUCCEEDED);
mockTask.handle(new TaskTAttemptEvent(specAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_SUCCEEDED));
assertEquals(TaskState.SUCCEEDED,mockTask.getState());
baseAttempt.setProgress(1.0f);
Counters taskCounters=mockTask.getCounters();
assertEquals("wrong counters for task",specAttemptCounters,taskCounters);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFailureDuringTaskAttemptCommit(){
mockTask=createMockTask(TaskType.MAP);
TaskId taskId=getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.COMMIT_PENDING);
commitTaskAttempt(getLastAttempt().getAttemptId());
updateLastAttemptState(TaskAttemptState.FAILED);
failRunningTaskAttempt(getLastAttempt().getAttemptId());
assertEquals(2,taskAttempts.size());
updateLastAttemptState(TaskAttemptState.SUCCEEDED);
commitTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ATTEMPT_SUCCEEDED));
assertFalse("First attempt should not commit",mockTask.canCommit(taskAttempts.get(0).getAttemptId()));
assertTrue("Second attempt should commit",mockTask.canCommit(getLastAttempt().getAttemptId()));
assertTaskSucceededState();
}
InternalCallVerifier EqualityVerifier
@Test public void testFailedTransitions(){
mockTask=new MockTaskImpl(jobId,partition,dispatcher.getEventHandler(),remoteJobConfFile,conf,taskAttemptListener,jobToken,credentials,clock,startCount,metrics,appContext,TaskType.MAP){
@Override protected int getMaxAttempts(){
return 1;
}
}
;
TaskId taskId=getNewTaskID();
scheduleTaskAttempt(taskId);
launchTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT));
launchTaskAttempt(getLastAttempt().getAttemptId());
assertEquals(4,taskAttempts.size());
MockTaskAttemptImpl taskAttempt=taskAttempts.get(0);
taskAttempt.setState(TaskAttemptState.FAILED);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_FAILED));
assertEquals(TaskState.FAILED,mockTask.getState());
mockTask.handle(new TaskEvent(taskId,TaskEventType.T_KILL));
assertEquals(TaskState.FAILED,mockTask.getState());
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT));
mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ATTEMPT_LAUNCHED));
assertEquals(TaskState.FAILED,mockTask.getState());
assertEquals(4,taskAttempts.size());
taskAttempt=taskAttempts.get(1);
taskAttempt.setState(TaskAttemptState.COMMIT_PENDING);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_COMMIT_PENDING));
assertEquals(TaskState.FAILED,mockTask.getState());
taskAttempt.setState(TaskAttemptState.FAILED);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_FAILED));
assertEquals(TaskState.FAILED,mockTask.getState());
taskAttempt=taskAttempts.get(2);
taskAttempt.setState(TaskAttemptState.SUCCEEDED);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_SUCCEEDED));
assertEquals(TaskState.FAILED,mockTask.getState());
taskAttempt=taskAttempts.get(3);
taskAttempt.setState(TaskAttemptState.KILLED);
mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_KILLED));
assertEquals(TaskState.FAILED,mockTask.getState());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testPoolLimits() throws InterruptedException {
ApplicationId appId=ApplicationId.newInstance(12345,67);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,3);
JobId jobId=MRBuilderUtils.newJobId(appId,8);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,9,TaskType.MAP);
TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,0);
ContainerId containerId=ContainerId.newInstance(appAttemptId,10);
AppContext context=mock(AppContext.class);
CustomContainerLauncher containerLauncher=new CustomContainerLauncher(context);
Configuration conf=new Configuration();
conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,12);
containerLauncher.init(conf);
containerLauncher.start();
ThreadPoolExecutor threadPool=containerLauncher.getThreadPool();
containerLauncher.expectedCorePoolSize=ContainerLauncherImpl.INITIAL_POOL_SIZE;
for (int i=0; i < 10; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher,10);
Assert.assertEquals(10,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.expectedCorePoolSize=12;
for (int i=1; i <= 4; i++) {
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host1" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher,12);
Assert.assertEquals(12,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.finishEventHandling=true;
waitForEvents(containerLauncher,14);
Assert.assertEquals(12,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testSlowNM() throws Exception {
conf=new Configuration();
int maxAttempts=1;
conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false);
conf.setInt("yarn.rpc.nm-command-timeout",3000);
conf.set(YarnConfiguration.IPC_RPC_IMPL,HadoopYarnProtoRPC.class.getName());
YarnRPC rpc=YarnRPC.create(conf);
String bindAddr="localhost:0";
InetSocketAddress addr=NetUtils.createSocketAddr(bindAddr);
NMTokenSecretManagerInNM tokenSecretManager=new NMTokenSecretManagerInNM();
MasterKey masterKey=Records.newRecord(MasterKey.class);
masterKey.setBytes(ByteBuffer.wrap("key".getBytes()));
tokenSecretManager.setMasterKey(masterKey);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"token");
server=rpc.getServer(ContainerManagementProtocol.class,new DummyContainerManager(),addr,conf,tokenSecretManager,1);
server.start();
MRApp app=new MRAppWithSlowNM(tokenSecretManager);
try {
Job job=app.submit(conf);
app.waitForState(job,JobState.RUNNING);
Map tasks=job.getTasks();
Assert.assertEquals("Num tasks is not correct",1,tasks.size());
Task task=tasks.values().iterator().next();
app.waitForState(task,TaskState.SCHEDULED);
Map attempts=tasks.values().iterator().next().getAttempts();
Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size());
TaskAttempt attempt=attempts.values().iterator().next();
app.waitForInternalState((TaskAttemptImpl)attempt,TaskAttemptStateInternal.ASSIGNED);
app.waitForState(job,JobState.FAILED);
String diagnostics=attempt.getDiagnostics().toString();
LOG.info("attempt.getDiagnostics: " + diagnostics);
Assert.assertTrue(diagnostics.contains("Container launch failed for " + "container_0_0000_01_000000 : "));
Assert.assertTrue(diagnostics.contains("java.net.SocketTimeoutException: 3000 millis timeout while waiting for channel"));
}
finally {
server.stop();
app.stop();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testPoolSize() throws InterruptedException {
ApplicationId appId=ApplicationId.newInstance(12345,67);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,3);
JobId jobId=MRBuilderUtils.newJobId(appId,8);
TaskId taskId=MRBuilderUtils.newTaskId(jobId,9,TaskType.MAP);
AppContext context=mock(AppContext.class);
CustomContainerLauncher containerLauncher=new CustomContainerLauncher(context);
containerLauncher.init(new Configuration());
containerLauncher.start();
ThreadPoolExecutor threadPool=containerLauncher.getThreadPool();
Assert.assertEquals(0,threadPool.getPoolSize());
Assert.assertEquals(ContainerLauncherImpl.INITIAL_POOL_SIZE,threadPool.getCorePoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.expectedCorePoolSize=ContainerLauncherImpl.INITIAL_POOL_SIZE;
for (int i=0; i < 10; i++) {
ContainerId containerId=ContainerId.newInstance(appAttemptId,i);
TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,i);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher,10);
Assert.assertEquals(10,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.finishEventHandling=true;
int timeOut=0;
while (containerLauncher.numEventsProcessed.get() < 10 && timeOut++ < 200) {
LOG.info("Waiting for number of events processed to become " + 10 + ". It is now "+ containerLauncher.numEventsProcessed.get()+ ". Timeout is "+ timeOut);
Thread.sleep(1000);
}
Assert.assertEquals(10,containerLauncher.numEventsProcessed.get());
containerLauncher.finishEventHandling=false;
for (int i=0; i < 10; i++) {
ContainerId containerId=ContainerId.newInstance(appAttemptId,i + 10);
TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,i + 10);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher,20);
Assert.assertEquals(10,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.expectedCorePoolSize=11 + ContainerLauncherImpl.INITIAL_POOL_SIZE;
containerLauncher.finishEventHandling=false;
ContainerId containerId=ContainerId.newInstance(appAttemptId,21);
TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,21);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host11:1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
waitForEvents(containerLauncher,21);
Assert.assertEquals(11,threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testPreemptReducers() throws Exception {
LOG.info("Running testPreemptReducers");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob,new SystemClock());
allocator.setMapResourceRequest(1024);
allocator.setReduceResourceRequest(1024);
RMContainerAllocator.AssignedRequests assignedRequests=allocator.getAssignedRequests();
RMContainerAllocator.ScheduledRequests scheduledRequests=allocator.getScheduledRequests();
ContainerRequestEvent event1=createReq(jobId,1,2048,new String[]{"h1"},false,false);
scheduledRequests.maps.put(mock(TaskAttemptId.class),new RMContainerRequestor.ContainerRequest(event1,null));
assignedRequests.reduces.put(mock(TaskAttemptId.class),mock(Container.class));
allocator.preemptReducesIfNeeded();
Assert.assertEquals("The reducer is not preempted",1,assignedRequests.preemptionWaitingReduces.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testCompletedTasksRecalculateSchedule() throws Exception {
LOG.info("Running testCompletedTasksRecalculateSchedule");
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job job=mock(Job.class);
when(job.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
doReturn(10).when(job).getTotalMaps();
doReturn(10).when(job).getTotalReduces();
doReturn(0).when(job).getCompletedMaps();
RecalculateContainerAllocator allocator=new RecalculateContainerAllocator(rm,conf,appAttemptId,job);
allocator.schedule();
allocator.recalculatedReduceSchedule=false;
allocator.schedule();
Assert.assertFalse("Unexpected recalculate of reduce schedule",allocator.recalculatedReduceSchedule);
doReturn(1).when(job).getCompletedMaps();
allocator.schedule();
Assert.assertTrue("Expected recalculate of reduce schedule",allocator.recalculatedReduceSchedule);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReportedAppProgress() throws Exception {
LOG.info("Running testReportedAppProgress");
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp rmApp=rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",21504);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp=new MRApp(appAttemptId,ContainerId.newInstance(appAttemptId,0),10,10,false,this.getClass().getName(),true,1){
@Override protected Dispatcher createDispatcher(){
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context){
return new MyContainerAllocator(rm,appAttemptId,context);
}
}
;
Assert.assertEquals(0.0,rmApp.getProgress(),0.0);
mrApp.submit(conf);
Job job=mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue();
DrainDispatcher amDispatcher=(DrainDispatcher)mrApp.getDispatcher();
MyContainerAllocator allocator=(MyContainerAllocator)mrApp.getContainerAllocator();
mrApp.waitForInternalState((JobImpl)job,JobStateInternal.RUNNING);
amDispatcher.await();
for ( Task t : job.getTasks().values()) {
if (t.getType() == TaskType.MAP) {
mrApp.waitForInternalState((TaskAttemptImpl)t.getAttempts().values().iterator().next(),TaskAttemptStateInternal.UNASSIGNED);
}
}
amDispatcher.await();
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
for ( Task t : job.getTasks().values()) {
if (t.getType() == TaskType.MAP) {
mrApp.waitForState(t,TaskState.RUNNING);
}
}
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.05f,job.getProgress(),0.001f);
Assert.assertEquals(0.05f,rmApp.getProgress(),0.001f);
Iterator it=job.getTasks().values().iterator();
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,1);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.095f,job.getProgress(),0.001f);
Assert.assertEquals(0.095f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,7);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.41f,job.getProgress(),0.001f);
Assert.assertEquals(0.41f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,2);
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
for ( Task t : job.getTasks().values()) {
if (t.getType() == TaskType.REDUCE) {
mrApp.waitForState(t,TaskState.RUNNING);
}
}
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,2);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.59f,job.getProgress(),0.001f);
Assert.assertEquals(0.59f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,8);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.95f,job.getProgress(),0.001f);
Assert.assertEquals(0.95f,rmApp.getProgress(),0.001f);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testResource() throws Exception {
LOG.info("Running testResource");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",10240);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,2048,new String[]{"h2"});
allocator.sendRequest(event2);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[]{event1,event2},assigned,false);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHeartbeatHandler() throws Exception {
LOG.info("Running testHeartbeatHandler");
Configuration conf=new Configuration();
conf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS,1);
ControlledClock clock=new ControlledClock(new SystemClock());
AppContext appContext=mock(AppContext.class);
when(appContext.getClock()).thenReturn(clock);
when(appContext.getApplicationID()).thenReturn(ApplicationId.newInstance(1,1));
RMContainerAllocator allocator=new RMContainerAllocator(mock(ClientService.class),appContext,new NoopAMPreemptionPolicy()){
@Override protected void register(){
}
@Override protected ApplicationMasterProtocol createSchedulerProxy(){
return mock(ApplicationMasterProtocol.class);
}
@Override protected synchronized void heartbeat() throws Exception {
}
}
;
allocator.init(conf);
allocator.start();
clock.setTime(5);
int timeToWaitMs=5000;
while (allocator.getLastHeartbeatTime() != 5 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs-=10;
}
Assert.assertEquals(5,allocator.getLastHeartbeatTime());
clock.setTime(7);
timeToWaitMs=5000;
while (allocator.getLastHeartbeatTime() != 7 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs-=10;
}
Assert.assertEquals(7,allocator.getLastHeartbeatTime());
final AtomicBoolean callbackCalled=new AtomicBoolean(false);
allocator.runOnNextHeartbeat(new Runnable(){
@Override public void run(){
callbackCalled.set(true);
}
}
);
clock.setTime(8);
timeToWaitMs=5000;
while (allocator.getLastHeartbeatTime() != 8 && timeToWaitMs > 0) {
Thread.sleep(10);
timeToWaitMs-=10;
}
Assert.assertEquals(8,allocator.getLastHeartbeatTime());
Assert.assertTrue(callbackCalled.get());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testBlackListedNodes() throws Exception {
LOG.info("Running testBlackListedNodes");
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1);
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",10240);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h2"});
allocator.sendRequest(event2);
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h3"});
allocator.sendRequest(event3);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false);
allocator.sendFailure(f1);
ContainerFailedEvent f2=createFailEvent(jobId,1,"h2",false);
allocator.sendFailure(f2);
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
assertBlacklistAdditionsAndRemovals(2,0,rm);
nodeManager1.nodeHeartbeat(false);
nodeManager2.nodeHeartbeat(false);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertTrue("No of assignments must be 3",assigned.size() == 3);
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertTrue("Assigned container host not correct","h3".equals(assig.getContainer().getNodeId().getHost()));
}
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMapNodeLocality() throws Exception {
LOG.info("Running testMapNodeLocality");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",3072);
rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",1536);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h1"});
allocator.sendRequest(event2);
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h2"});
allocator.sendRequest(event3);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager3.nodeHeartbeat(true);
nodeManager1.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[]{event1,event2,event3},assigned,false);
for ( TaskAttemptContainerAssignedEvent event : assigned) {
if (event.getTaskAttemptID().equals(event3.getAttemptID())) {
assigned.remove(event);
Assert.assertTrue(event.getContainer().getNodeId().getHost().equals("h3"));
break;
}
}
checkAssignments(new ContainerRequestEvent[]{event1,event2},assigned,true);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testIgnoreBlacklisting() throws Exception {
LOG.info("Running testIgnoreBlacklisting");
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,33);
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM[] nodeManagers=new MockNM[10];
int nmNum=0;
List assigned=null;
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
nodeManagers[0].nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
assigned=getContainerOnHost(jobId,1,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
LOG.info("Failing container _1 on H1 (Node should be blacklisted and" + " ignore blacklisting enabled");
ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false);
allocator.sendFailure(f1);
assigned=getContainerOnHost(jobId,2,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,1,0,0,1,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
assigned=getContainerOnHost(jobId,2,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,3,1024,new String[]{"h2"},nodeManagers[1],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,4,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
assigned=getContainerOnHost(jobId,5,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,6,1024,new String[]{"h4"},nodeManagers[3],dispatcher,allocator,0,0,1,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
assigned=getContainerOnHost(jobId,7,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
ContainerFailedEvent f2=createFailEvent(jobId,3,"h2",false);
allocator.sendFailure(f2);
assigned=getContainerOnHost(jobId,8,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,1,0,0,2,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
assigned=getContainerOnHost(jobId,8,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 2",2,assigned.size());
assigned=getContainerOnHost(jobId,9,1024,new String[]{"h2"},nodeManagers[1],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
ContainerFailedEvent f3=createFailEvent(jobId,4,"h3",false);
allocator.sendFailure(f3);
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,10,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
for (int i=0; i < 5; i++) {
nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher);
assigned=getContainerOnHost(jobId,11 + i,1024,new String[]{String.valueOf(5 + i)},nodeManagers[4 + i],dispatcher,allocator,0,0,(i == 4 ? 3 : 0),0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
}
assigned=getContainerOnHost(jobId,20,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testNonAggressivelyPreemptReducers() throws Exception {
LOG.info("Running testPreemptReducers");
final int preemptThreshold=2;
Configuration conf=new Configuration();
conf.setInt(MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,preemptThreshold);
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
ControlledClock clock=new ControlledClock(null);
clock.setTime(1);
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob,clock);
allocator.setMapResourceRequest(1024);
allocator.setReduceResourceRequest(1024);
RMContainerAllocator.AssignedRequests assignedRequests=allocator.getAssignedRequests();
RMContainerAllocator.ScheduledRequests scheduledRequests=allocator.getScheduledRequests();
ContainerRequestEvent event1=createReq(jobId,1,2048,new String[]{"h1"},false,false);
scheduledRequests.maps.put(mock(TaskAttemptId.class),new RMContainerRequestor.ContainerRequest(event1,null,clock.getTime()));
assignedRequests.reduces.put(mock(TaskAttemptId.class),mock(Container.class));
clock.setTime(clock.getTime() + 1);
allocator.preemptReducesIfNeeded();
Assert.assertEquals("The reducer is aggressively preeempted",0,assignedRequests.preemptionWaitingReduces.size());
clock.setTime(clock.getTime() + (preemptThreshold) * 1000);
allocator.preemptReducesIfNeeded();
Assert.assertEquals("The reducer is not preeempted",1,assignedRequests.preemptionWaitingReduces.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testReducerRampdownDiagnostics() throws Exception {
LOG.info("Running tesReducerRampdownDiagnostics");
final Configuration conf=new Configuration();
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,0.0f);
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
final DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
final RMApp app=rm.submitApp(1024);
dispatcher.await();
final String host="host1";
final MockNM nm=rm.registerNode(String.format("%s:1234",host),2048);
nm.nodeHeartbeat(true);
dispatcher.await();
final ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
final JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
final Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
final MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
dispatcher.await();
final String[] locations=new String[]{host};
allocator.sendRequest(createReq(jobId,0,1024,locations,false,true));
for (int i=0; i < 1; ) {
dispatcher.await();
i+=allocator.schedule().size();
nm.nodeHeartbeat(true);
}
allocator.sendRequest(createReq(jobId,0,1024,locations,true,false));
while (allocator.getTaskAttemptKillEvents().size() == 0) {
dispatcher.await();
allocator.schedule().size();
nm.nodeHeartbeat(true);
}
final String killEventMessage=allocator.getTaskAttemptKillEvents().get(0).getMessage();
Assert.assertTrue("No reducer rampDown preemption message",killEventMessage.contains(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUpdatedNodes() throws Exception {
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nm1=rm.registerNode("h1:1234",10240);
MockNM nm2=rm.registerNode("h2:1234",10240);
dispatcher.await();
ContainerRequestEvent event=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event);
TaskAttemptId attemptId=event.getAttemptID();
TaskAttempt mockTaskAttempt=mock(TaskAttempt.class);
when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId());
Task mockTask=mock(Task.class);
when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt);
when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask);
List assigned=allocator.schedule();
dispatcher.await();
nm1.nodeHeartbeat(true);
dispatcher.await();
Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(3,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
allocator.getJobUpdatedNodeEvents().clear();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(1,assigned.size());
Assert.assertEquals(nm1.getNodeId(),assigned.get(0).getContainer().getNodeId());
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
nm1.nodeHeartbeat(false);
nm2.nodeHeartbeat(false);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(0,assigned.size());
Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size());
Assert.assertEquals(1,allocator.getTaskAttemptKillEvents().size());
Assert.assertEquals(2,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size());
Assert.assertEquals(attemptId,allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID());
allocator.getJobUpdatedNodeEvents().clear();
allocator.getTaskAttemptKillEvents().clear();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(0,assigned.size());
Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty());
Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testUnregistrationOnlyIfRegistered() throws Exception {
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp rmApp=rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",11264);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp=new MRApp(appAttemptId,ContainerId.newInstance(appAttemptId,0),10,0,false,this.getClass().getName(),true,1){
@Override protected Dispatcher createDispatcher(){
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context){
return new MyContainerAllocator(rm,appAttemptId,context);
}
}
;
mrApp.submit(conf);
DrainDispatcher amDispatcher=(DrainDispatcher)mrApp.getDispatcher();
MyContainerAllocator allocator=(MyContainerAllocator)mrApp.getContainerAllocator();
amDispatcher.await();
Assert.assertTrue(allocator.isApplicationMasterRegistered());
mrApp.stop();
Assert.assertTrue(allocator.isUnregistered());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testBlackListedNodesWithSchedulingToThatNode() throws Exception {
LOG.info("Running testBlackListedNodesWithSchedulingToThatNode");
Configuration conf=new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1);
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
LOG.info("Requesting 1 Containers _1 on H1");
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
LOG.info("RM Heartbeat (to send the container requests)");
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
LOG.info("h1 Heartbeat (To actually schedule the containers)");
nodeManager1.nodeHeartbeat(true);
dispatcher.await();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertEquals("No of assignments must be 1",1,assigned.size());
LOG.info("Failing container _1 on H1 (should blacklist the node)");
ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false);
allocator.sendFailure(f1);
ContainerRequestEvent event1f=createReq(jobId,1,1024,new String[]{"h1"},true,false);
allocator.sendRequest(event1f);
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(1,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h1","h3"});
allocator.sendRequest(event3);
LOG.info("h1 Heartbeat (To actually schedule the containers)");
nodeManager1.nodeHeartbeat(true);
dispatcher.await();
LOG.info("RM Heartbeat (To process the scheduled containers)");
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
LOG.info("RM Heartbeat (To process the re-scheduled containers)");
assigned=allocator.schedule();
dispatcher.await();
assertBlacklistAdditionsAndRemovals(0,0,rm);
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
LOG.info("h3 Heartbeat (To re-schedule the containers)");
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
LOG.info("RM Heartbeat (To process the re-scheduled containers for H3)");
assigned=allocator.schedule();
assertBlacklistAdditionsAndRemovals(0,0,rm);
dispatcher.await();
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
LOG.info(assig.getTaskAttemptID() + " assgined to " + assig.getContainer().getId()+ " with priority "+ assig.getContainer().getPriority());
}
Assert.assertEquals("No of assignments must be 2",2,assigned.size());
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertEquals("Assigned container " + assig.getContainer().getId() + " host not correct","h3",assig.getContainer().getNodeId().getHost());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSimple() throws Exception {
LOG.info("Running testSimple");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",10240);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h2"});
allocator.sendRequest(event2);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
Assert.assertEquals(4,rm.getMyFifoScheduler().lastAsk.size());
ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h3"});
allocator.sendRequest(event3);
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
Assert.assertEquals(3,rm.getMyFifoScheduler().lastAsk.size());
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(0,rm.getMyFifoScheduler().lastAsk.size());
checkAssignments(new ContainerRequestEvent[]{event1,event2,event3},assigned,false);
assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals(5,rm.getMyFifoScheduler().lastAsk.size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testCompletedContainerEvent(){
RMContainerAllocator allocator=new RMContainerAllocator(mock(ClientService.class),mock(AppContext.class),new NoopAMPreemptionPolicy());
TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(MRBuilderUtils.newTaskId(MRBuilderUtils.newJobId(1,1,1),1,TaskType.MAP),1);
ApplicationId applicationId=ApplicationId.newInstance(1,1);
ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(applicationAttemptId,1);
ContainerStatus status=ContainerStatus.newInstance(containerId,ContainerState.RUNNING,"",0);
ContainerStatus abortedStatus=ContainerStatus.newInstance(containerId,ContainerState.RUNNING,"",ContainerExitStatus.ABORTED);
TaskAttemptEvent event=allocator.createContainerFinishedEvent(status,attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,event.getType());
TaskAttemptEvent abortedEvent=allocator.createContainerFinishedEvent(abortedStatus,attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL,abortedEvent.getType());
ContainerId containerId2=ContainerId.newInstance(applicationAttemptId,2);
ContainerStatus status2=ContainerStatus.newInstance(containerId2,ContainerState.RUNNING,"",0);
ContainerStatus preemptedStatus=ContainerStatus.newInstance(containerId2,ContainerState.RUNNING,"",ContainerExitStatus.PREEMPTED);
TaskAttemptEvent event2=allocator.createContainerFinishedEvent(status2,attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,event2.getType());
TaskAttemptEvent abortedEvent2=allocator.createContainerFinishedEvent(preemptedStatus,attemptId);
Assert.assertEquals(TaskAttemptEventType.TA_KILL,abortedEvent2.getType());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReportedAppProgressWithOnlyMaps() throws Exception {
LOG.info("Running testReportedAppProgressWithOnlyMaps");
Configuration conf=new Configuration();
final MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher rmDispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp rmApp=rm.submitApp(1024);
rmDispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",11264);
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
final ApplicationAttemptId appAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
rmDispatcher.await();
MRApp mrApp=new MRApp(appAttemptId,ContainerId.newInstance(appAttemptId,0),10,0,false,this.getClass().getName(),true,1){
@Override protected Dispatcher createDispatcher(){
return new DrainDispatcher();
}
protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context){
return new MyContainerAllocator(rm,appAttemptId,context);
}
}
;
Assert.assertEquals(0.0,rmApp.getProgress(),0.0);
mrApp.submit(conf);
Job job=mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue();
DrainDispatcher amDispatcher=(DrainDispatcher)mrApp.getDispatcher();
MyContainerAllocator allocator=(MyContainerAllocator)mrApp.getContainerAllocator();
mrApp.waitForInternalState((JobImpl)job,JobStateInternal.RUNNING);
amDispatcher.await();
for ( Task t : job.getTasks().values()) {
mrApp.waitForInternalState((TaskAttemptImpl)t.getAttempts().values().iterator().next(),TaskAttemptStateInternal.UNASSIGNED);
}
amDispatcher.await();
allocator.schedule();
rmDispatcher.await();
amNodeManager.nodeHeartbeat(true);
rmDispatcher.await();
allocator.schedule();
rmDispatcher.await();
for ( Task t : job.getTasks().values()) {
mrApp.waitForState(t,TaskState.RUNNING);
}
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.05f,job.getProgress(),0.001f);
Assert.assertEquals(0.05f,rmApp.getProgress(),0.001f);
Iterator it=job.getTasks().values().iterator();
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,1);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.14f,job.getProgress(),0.001f);
Assert.assertEquals(0.14f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,5);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.59f,job.getProgress(),0.001f);
Assert.assertEquals(0.59f,rmApp.getProgress(),0.001f);
finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,4);
allocator.schedule();
rmDispatcher.await();
Assert.assertEquals(0.95f,job.getProgress(),0.001f);
Assert.assertEquals(0.95f,rmApp.getProgress(),0.001f);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRMContainerAllocatorResendsRequestsOnRMRestart() throws Exception {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RECOVERY_ENABLED,"true");
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,true);
conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true);
conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1);
conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MyResourceManager rm1=new MyResourceManager(conf,memStore);
rm1.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher();
RMApp app=rm1.submitApp(1024);
dispatcher.await();
MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm1,conf,appAttemptId,mockJob);
ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"});
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,2048,new String[]{"h1","h2"});
allocator.sendRequest(event2);
ContainerFailedEvent f1=createFailEvent(jobId,1,"h2",false);
allocator.sendFailure(f1);
List assignedContainers=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size());
assertAsksAndReleases(3,0,rm1);
assertBlacklistAdditionsAndRemovals(1,0,rm1);
nm1.nodeHeartbeat(true);
dispatcher.await();
assignedContainers=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 2",2,assignedContainers.size());
assertAsksAndReleases(0,0,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
assignedContainers=allocator.schedule();
Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size());
assertAsksAndReleases(3,0,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
ContainerRequestEvent event3=createReq(jobId,3,1000,new String[]{"h1"});
allocator.sendRequest(event3);
ContainerAllocatorEvent deallocate1=createDeallocateEvent(jobId,1,false);
allocator.sendDeallocate(deallocate1);
assignedContainers=allocator.schedule();
Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size());
assertAsksAndReleases(3,1,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
MyResourceManager rm2=new MyResourceManager(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
allocator.updateSchedulerProxy(rm2);
dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher();
NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
nm1=new MockNM("h1:1234",10240,rm2.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ContainerAllocatorEvent deallocate2=createDeallocateEvent(jobId,2,false);
allocator.sendDeallocate(deallocate2);
ContainerFailedEvent f2=createFailEvent(jobId,1,"h3",false);
allocator.sendFailure(f2);
ContainerRequestEvent event4=createReq(jobId,4,2000,new String[]{"h1","h2"});
allocator.sendRequest(event4);
allocator.schedule();
dispatcher.await();
Assert.assertTrue("Last allocate response is not RESYNC",allocator.isResyncCommand());
ContainerRequestEvent event5=createReq(jobId,5,3000,new String[]{"h1","h2","h3"});
allocator.sendRequest(event5);
assignedContainers=allocator.schedule();
dispatcher.await();
assertAsksAndReleases(3,2,rm2);
assertBlacklistAdditionsAndRemovals(2,0,rm2);
nm1.nodeHeartbeat(true);
dispatcher.await();
assignedContainers=allocator.schedule();
dispatcher.await();
Assert.assertEquals("Number of container should be 3",3,assignedContainers.size());
for ( TaskAttemptContainerAssignedEvent assig : assignedContainers) {
Assert.assertTrue("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost()));
}
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMapReduceScheduling() throws Exception {
LOG.info("Running testMapReduceScheduling");
Configuration conf=new Configuration();
MyResourceManager rm=new MyResourceManager(conf);
rm.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher();
RMApp app=rm.submitApp(1024);
dispatcher.await();
MockNM amNodeManager=rm.registerNode("amNM:1234",2048);
amNodeManager.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0);
Job mockJob=mock(Job.class);
when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,""));
MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob);
MockNM nodeManager1=rm.registerNode("h1:1234",1024);
MockNM nodeManager2=rm.registerNode("h2:1234",10240);
MockNM nodeManager3=rm.registerNode("h3:1234",10240);
dispatcher.await();
ContainerRequestEvent event1=createReq(jobId,1,2048,new String[]{"h1","h2"},true,false);
allocator.sendRequest(event1);
ContainerRequestEvent event2=createReq(jobId,2,3000,new String[]{"h1"},false,true);
allocator.sendRequest(event2);
ContainerRequestEvent event3=createReq(jobId,3,2048,new String[]{"h3"},false,false);
allocator.sendRequest(event3);
List assigned=allocator.schedule();
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,assigned.size());
nodeManager1.nodeHeartbeat(true);
nodeManager2.nodeHeartbeat(true);
nodeManager3.nodeHeartbeat(true);
dispatcher.await();
assigned=allocator.schedule();
dispatcher.await();
checkAssignments(new ContainerRequestEvent[]{event1,event3},assigned,false);
for ( TaskAttemptContainerAssignedEvent assig : assigned) {
Assert.assertFalse("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost()));
}
}
InternalCallVerifier EqualityVerifier
@Test public void testSingleEntryDataStatistics() throws Exception {
DataStatistics statistics=new DataStatistics(17.29);
Assert.assertEquals(1,statistics.count(),TOL);
Assert.assertEquals(17.29,statistics.mean(),TOL);
Assert.assertEquals(0,statistics.var(),TOL);
Assert.assertEquals(0,statistics.std(),TOL);
Assert.assertEquals(17.29,statistics.outlier(1.0f),TOL);
}
InternalCallVerifier EqualityVerifier
@Test public void testEmptyDataStatistics() throws Exception {
DataStatistics statistics=new DataStatistics();
Assert.assertEquals(0,statistics.count(),TOL);
Assert.assertEquals(0,statistics.mean(),TOL);
Assert.assertEquals(0,statistics.var(),TOL);
Assert.assertEquals(0,statistics.std(),TOL);
Assert.assertEquals(0,statistics.outlier(1.0f),TOL);
}
InternalCallVerifier EqualityVerifier
@Test public void testUpdateStatistics() throws Exception {
DataStatistics statistics=new DataStatistics(17);
statistics.add(29);
Assert.assertEquals(2,statistics.count(),TOL);
Assert.assertEquals(23.0,statistics.mean(),TOL);
Assert.assertEquals(36.0,statistics.var(),TOL);
statistics.updateStatistics(17,29);
Assert.assertEquals(2,statistics.count(),TOL);
Assert.assertEquals(29.0,statistics.mean(),TOL);
Assert.assertEquals(0.0,statistics.var(),TOL);
}
InternalCallVerifier EqualityVerifier
@Test public void testMutiEntryDataStatistics() throws Exception {
DataStatistics statistics=new DataStatistics();
statistics.add(17);
statistics.add(29);
Assert.assertEquals(2,statistics.count(),TOL);
Assert.assertEquals(23.0,statistics.mean(),TOL);
Assert.assertEquals(36.0,statistics.var(),TOL);
Assert.assertEquals(6.0,statistics.std(),TOL);
Assert.assertEquals(29.0,statistics.outlier(1.0f),TOL);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMRWebAppRedirection() throws Exception {
String[] schemePrefix={WebAppUtils.HTTP_PREFIX,WebAppUtils.HTTPS_PREFIX};
for ( String scheme : schemePrefix) {
MRApp app=new MRApp(2,2,true,this.getClass().getName(),true){
@Override protected ClientService createClientService( AppContext context){
return new MRClientService(context);
}
}
;
Configuration conf=new Configuration();
conf.set(YarnConfiguration.PROXY_ADDRESS,"9.9.9.9");
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,scheme.equals(WebAppUtils.HTTPS_PREFIX) ? Policy.HTTPS_ONLY.name() : Policy.HTTP_ONLY.name());
webProxyBase="/proxy/" + app.getAppID();
conf.set("hadoop.http.filter.initializers",TestAMFilterInitializer.class.getName());
Job job=app.submit(conf);
String hostPort=NetUtils.getHostPortString(((MRClientService)app.getClientService()).getWebApp().getListenerAddress());
URL httpUrl=new URL("http://" + hostPort + "/mapreduce");
HttpURLConnection conn=(HttpURLConnection)httpUrl.openConnection();
conn.setInstanceFollowRedirects(false);
conn.connect();
String expectedURL=scheme + conf.get(YarnConfiguration.PROXY_ADDRESS) + ProxyUriUtils.getPath(app.getAppID(),"/mapreduce");
Assert.assertEquals(expectedURL,conn.getHeaderField(HttpHeaders.LOCATION));
Assert.assertEquals(HttpStatus.SC_MOVED_TEMPORARILY,conn.getResponseCode());
app.waitForState(job,JobState.SUCCEEDED);
app.verifyCompleted();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppControllerIndex(){
AppContext ctx=new MockAppContext(0,1,1,1);
Injector injector=WebAppTests.createMockInjector(AppContext.class,ctx);
AppController controller=injector.getInstance(AppController.class);
controller.index();
assertEquals(ctx.getApplicationID().toString(),controller.get(APP_ID,""));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidAccept() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("mapreduce").accept(MediaType.TEXT_PLAIN).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testAMDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyAMInfo(json.getJSONObject("info"),appContext);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri2() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("invalid").accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testBlacklistedNodesXML() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("blacklistednodes").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
verifyBlacklistedNodesInfoXML(xml,appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testInfoSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("info/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyAMInfo(json.getJSONObject("info"),appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testInfoDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("info/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyAMInfo(json.getJSONObject("info"),appContext);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("mapreduce").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testAMXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
verifyAMInfoXML(xml,appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testInfoXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("info/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
verifyAMInfoXML(xml,appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testBlacklistedNodes() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("blacklistednodes").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyBlacklistedNodesInfo(json,appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testAMSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyAMInfo(json.getJSONObject("info"),appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testInfo() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("info").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyAMInfo(json.getJSONObject("info"),appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testAM() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyAMInfo(json.getJSONObject("info"),appContext);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyAMTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyAMTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("taskAttempt");
for (int i=0; i < nodes.getLength(); i++) {
Element element=(Element)nodes.item(i);
verifyAMTaskAttemptXML(element,att,task.getType());
}
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList attempts=dom.getElementsByTagName("taskAttempts");
assertEquals("incorrect number of elements",1,attempts.getLength());
NodeList nodes=dom.getElementsByTagName("taskAttempt");
verifyAMTaskAttemptsXML(nodes,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttempts() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyAMTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyAMTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskAttemptCounters");
verifyAMJobTaskAttemptCounters(info,att);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdXMLCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("jobTaskAttemptCounters");
verifyAMTaskCountersXML(nodes,att);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyAMTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyAMTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyAMJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConf() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyAMJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyAMJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("conf");
verifyAMJobConfXML(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyAMJobCounters(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttempts() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyJobAttempts(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
verifyAMJob(info,jobsMap.get(id));
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdInvalidXML() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_XML).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String msg=response.getEntity(String.class);
System.out.println(msg);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(msg));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("RemoteException");
Element element=(Element)nodes.item(0);
String message=WebServicesTestUtils.getXmlString(element,"message");
String type=WebServicesTestUtils.getXmlString(element,"exception");
String classname=WebServicesTestUtils.getXmlString(element,"javaClassName");
verifyJobIdInvalid(message,type,classname);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdInvalid() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
verifyJobIdInvalid(message,type,classname);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("jobCounters");
verifyAMJobCountersXML(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyJobAttempts(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsXML() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList jobs=dom.getElementsByTagName("jobs");
assertEquals("incorrect number of elements",1,jobs.getLength());
NodeList job=dom.getElementsByTagName("job");
assertEquals("incorrect number of elements",1,job.getLength());
verifyAMJobXML(job,appContext);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyAMJobCounters(info,jobsMap.get(id));
}
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getJob(MRApps.toJobID(info.getString("id")));
verifyAMJob(info,job);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getJob(MRApps.toJobID(info.getString("id")));
verifyAMJob(info,job);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
verifyAMJob(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyAMJobCounters(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList attempts=dom.getElementsByTagName("jobAttempts");
assertEquals("incorrect number of elements",1,attempts.getLength());
NodeList info=dom.getElementsByTagName("jobAttempt");
verifyJobAttemptsXML(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
verifyAMJob(info,jobsMap.get(id));
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdInvalidDefault() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
verifyJobIdInvalid(message,type,classname);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdNonExist() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_0_1234").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: job, job_0_1234, is not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testJobs() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getJob(MRApps.toJobID(info.getString("id")));
verifyAMJob(info,job);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyJobAttempts(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList job=dom.getElementsByTagName("job");
verifyAMJobXML(job,appContext);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdInvalidBogus() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path("bogusfoo").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: JobId string : bogusfoo is not properly formed",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCountersSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyAMJobTaskCounters(info,task);
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdInvalid() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="task_0_0000_d_000000";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Bad TaskType identifier. TaskId string : " + "task_0_0000_d_000000 is not properly formed.",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyAMSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksQueryMap() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String type="m";
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",1,arr.length());
verifyAMTask(arr,jobsMap.get(id),type);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdBogus() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="bogustaskid";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "bogustaskid is not properly formed",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTasksQueryInvalid() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tasktype="reduce";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",tasktype).accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: tasktype must be either m or r",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdNonExist() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="task_0_0000_m_000000";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: task not found with id task_0_0000_m_000000",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobTaskCountersXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("jobTaskCounters");
verifyAMTaskCountersXML(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList tasks=dom.getElementsByTagName("tasks");
assertEquals("incorrect number of elements",1,tasks.getLength());
NodeList task=dom.getElementsByTagName("task");
verifyAMTaskXML(task,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyAMJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasks() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyAMTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyAMTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyAMTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyAMSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyAMSingleTask(info,task);
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdInvalid3() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="task_0_0000_m";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0_0000_m is not properly formed",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCountersDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyAMJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksQueryReduce() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String type="r";
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",1,arr.length());
verifyAMTask(arr,jobsMap.get(id),type);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdInvalid2() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="task_0_m_000000";
try {
r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0_m_000000 is not properly formed",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("task");
for (int i=0; i < nodes.getLength(); i++) {
Element element=(Element)nodes.item(i);
verifyAMSingleTaskXML(element,task);
}
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Test method 'singleJobCounter'. Should set SingleCounterPage class for rendering
*/
@Test public void testGetSingleJobCounter() throws IOException {
appController.singleJobCounter();
assertEquals(SingleCounterPage.class,appController.getClazz());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test method 'singleTaskCounter'. Should set SingleCounterPage class for rendering
*/
@Test public void testGetSingleTaskCounter() throws IOException {
appController.singleTaskCounter();
assertEquals(SingleCounterPage.class,appController.getClazz());
assertNotNull(appController.getProperty().get(AppController.COUNTER_GROUP));
assertNotNull(appController.getProperty().get(AppController.COUNTER_NAME));
}
InternalCallVerifier EqualityVerifier
/**
* Test method 'job'. Should print message about error or set JobPage class for rendering
*/
@Test public void testGetJob(){
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false);
appController.job();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
appController.getProperty().remove(AMParams.JOB_ID);
appController.job();
assertEquals("Access denied: User user does not have permission to view job job_01_01Bad Request: Missing job ID",appController.getData());
appController.getProperty().put(AMParams.JOB_ID,"job_01_01");
appController.job();
assertEquals(JobPage.class,appController.getClazz());
}
InternalCallVerifier EqualityVerifier
/**
* Test method 'tasks'. Should set TasksPage class for rendering
*/
@Test public void testTasks(){
appController.tasks();
assertEquals(TasksPage.class,appController.getClazz());
}
InternalCallVerifier EqualityVerifier
/**
* Test method 'taskCounters'. Should print message about error or set CountersPage class for rendering
*/
@Test public void testGetTaskCounters(){
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false);
appController.taskCounters();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
appController.getProperty().remove(AMParams.TASK_ID);
appController.taskCounters();
assertEquals("Access denied: User user does not have permission to view job job_01_01missing task ID",appController.getData());
appController.getProperty().put(AMParams.TASK_ID,"task_01_01_m01_01");
appController.taskCounters();
assertEquals(CountersPage.class,appController.getClazz());
}
InternalCallVerifier EqualityVerifier
/**
* Test method 'conf'. Should set JobConfPage class for rendering
*/
@Test public void testConfiguration(){
appController.conf();
assertEquals(JobConfPage.class,appController.getClazz());
}
InternalCallVerifier EqualityVerifier
/**
* Test method 'conf'. Should set AttemptsPage class for rendering or print information about error
*/
@Test public void testAttempts(){
appController.getProperty().remove(AMParams.TASK_TYPE);
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false);
appController.attempts();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
appController.getProperty().remove(AMParams.TASK_ID);
appController.attempts();
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
appController.getProperty().put(AMParams.TASK_ID,"task_01_01_m01_01");
appController.attempts();
assertEquals("Bad request: missing task-type.",appController.getProperty().get("title"));
appController.getProperty().put(AMParams.TASK_TYPE,"m");
appController.attempts();
assertEquals("Bad request: missing attempt-state.",appController.getProperty().get("title"));
appController.getProperty().put(AMParams.ATTEMPT_STATE,"State");
appController.attempts();
assertEquals(AttemptsPage.class,appController.getClazz());
}
InternalCallVerifier EqualityVerifier
/**
* Test method 'task'. Should set TaskPage class for rendering and information for title
*/
@Test public void testTask(){
appController.task();
assertEquals("Attempts for task_01_01_m01_01",appController.getProperty().get("title"));
assertEquals(TaskPage.class,appController.getClazz());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test the method 'info'.
*/
@Test public void testInfo(){
appController.info();
Iterator iterator=appController.getResponseInfo().iterator();
ResponseInfo.Item item=iterator.next();
assertEquals("Application ID:",item.key);
assertEquals("application_0_0000",item.value);
item=iterator.next();
assertEquals("Application Name:",item.key);
assertEquals("AppName",item.value);
item=iterator.next();
assertEquals("User:",item.key);
assertEquals("User",item.value);
item=iterator.next();
assertEquals("Started on:",item.key);
item=iterator.next();
assertEquals("Elasped: ",item.key);
}
InternalCallVerifier EqualityVerifier
/**
* Test method 'jobCounters'. Should print message about error or set CountersPage class for rendering
*/
@Test public void testGetJobCounters(){
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false);
appController.jobCounters();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData());
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
appController.getProperty().remove(AMParams.JOB_ID);
appController.jobCounters();
assertEquals("Access denied: User user does not have permission to view job job_01_01Bad Request: Missing job ID",appController.getData());
appController.getProperty().put(AMParams.JOB_ID,"job_01_01");
appController.jobCounters();
assertEquals(CountersPage.class,appController.getClazz());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testTaskStartTimes(){
TaskId taskId=mock(TaskId.class);
TaskInfo taskInfo=mock(TaskInfo.class);
Map taskAttempts=new TreeMap();
TaskAttemptID id=new TaskAttemptID("0",0,TaskType.MAP,0,0);
TaskAttemptInfo info=mock(TaskAttemptInfo.class);
when(info.getAttemptId()).thenReturn(id);
when(info.getStartTime()).thenReturn(10l);
taskAttempts.put(id,info);
id=new TaskAttemptID("1",0,TaskType.MAP,1,1);
info=mock(TaskAttemptInfo.class);
when(info.getAttemptId()).thenReturn(id);
when(info.getStartTime()).thenReturn(20l);
taskAttempts.put(id,info);
when(taskInfo.getAllTaskAttempts()).thenReturn(taskAttempts);
CompletedTask task=new CompletedTask(taskId,taskInfo);
TaskReport report=task.getReport();
assertTrue(report.getStartTime() == 10);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test some methods of CompletedTaskAttempt
*/
@Test(timeout=5000) public void testCompletedTaskAttempt(){
TaskAttemptInfo attemptInfo=mock(TaskAttemptInfo.class);
when(attemptInfo.getRackname()).thenReturn("Rackname");
when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
when(attemptInfo.getSortFinishTime()).thenReturn(12L);
when(attemptInfo.getShufflePort()).thenReturn(10);
JobID jobId=new JobID("12345",0);
TaskID taskId=new TaskID(jobId,TaskType.REDUCE,0);
TaskAttemptID taskAttemptId=new TaskAttemptID(taskId,0);
when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
CompletedTaskAttempt taskAttemt=new CompletedTaskAttempt(null,attemptInfo);
assertEquals("Rackname",taskAttemt.getNodeRackName());
assertEquals(Phase.CLEANUP,taskAttemt.getPhase());
assertTrue(taskAttemt.isFinished());
assertEquals(11L,taskAttemt.getShuffleFinishTime());
assertEquals(12L,taskAttemt.getSortFinishTime());
assertEquals(10,taskAttemt.getShufflePort());
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
new Thread(){
@Override public void run(){
try {
Thread.sleep(500);
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
}
catch ( Exception ex) {
Assert.fail(ex.toString());
}
}
}
.start();
testCreateHistoryDirs(dfsCluster.getConfiguration(0),new SystemClock());
}
UtilityVerifier InternalCallVerifier BooleanVerifier ExceptionVerifier HybridVerifier
@Test(expected=YarnRuntimeException.class) public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
final ControlledClock clock=new ControlledClock(new SystemClock());
clock.setTime(1);
new Thread(){
@Override public void run(){
try {
Thread.sleep(500);
clock.setTime(3000);
}
catch ( Exception ex) {
Assert.fail(ex.toString());
}
}
}
.start();
testCreateHistoryDirs(dfsCluster.getConfiguration(0),clock);
}
InternalCallVerifier BooleanVerifier
@Test public void testCreateDirsWithFileSystem() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
testTryCreateHistoryDirs(dfsCluster.getConfiguration(0),true);
}
InternalCallVerifier BooleanVerifier
@Test public void testCreateDirsWithFileSystemInSafeMode() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
Assert.assertTrue(dfsCluster.getFileSystem().isInSafeMode());
testTryCreateHistoryDirs(dfsCluster.getConfiguration(0),false);
}
InternalCallVerifier BooleanVerifier
@Test public void testCreateDirsWithAdditionalFileSystem() throws Exception {
dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
dfsCluster2.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
Assert.assertFalse(dfsCluster.getFileSystem().isInSafeMode());
Assert.assertFalse(dfsCluster2.getFileSystem().isInSafeMode());
Configuration conf=new Configuration(false);
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,dfsCluster.getURI().toString());
FileOutputStream os=new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
testTryCreateHistoryDirs(dfsCluster2.getConfiguration(0),true);
Assert.assertTrue(dfsCluster.getFileSystem().exists(new Path(getDoneDirNameForTest())));
Assert.assertTrue(dfsCluster.getFileSystem().exists(new Path(getIntermediateDoneDirNameForTest())));
Assert.assertFalse(dfsCluster2.getFileSystem().exists(new Path(getDoneDirNameForTest())));
Assert.assertFalse(dfsCluster2.getFileSystem().exists(new Path(getIntermediateDoneDirNameForTest())));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUpdatedTokenRecovery() throws IOException {
IOException intentionalErr=new IOException("intentional error");
FileSystem fs=FileSystem.getLocal(conf);
final FileSystem spyfs=spy(fs);
ArgumentMatcher updateTmpMatcher=new ArgumentMatcher(){
@Override public boolean matches( Object argument){
if (argument instanceof Path) {
return ((Path)argument).getName().startsWith("update");
}
return false;
}
}
;
doThrow(intentionalErr).when(spyfs).rename(argThat(updateTmpMatcher),isA(Path.class));
conf.set(JHAdminConfig.MR_HS_FS_STATE_STORE_URI,testDir.getAbsoluteFile().toURI().toString());
HistoryServerStateStoreService store=new HistoryServerFileSystemStateStoreService(){
@Override FileSystem createFileSystem() throws IOException {
return spyfs;
}
}
;
store.init(conf);
store.start();
final MRDelegationTokenIdentifier token1=new MRDelegationTokenIdentifier(new Text("tokenOwner1"),new Text("tokenRenewer1"),new Text("tokenUser1"));
token1.setSequenceNumber(1);
final Long tokenDate1=1L;
store.storeToken(token1,tokenDate1);
final Long newTokenDate1=975318642L;
try {
store.updateToken(token1,newTokenDate1);
fail("intentional error not thrown");
}
catch ( IOException e) {
assertEquals(intentionalErr,e);
}
store.close();
store=createAndStartStore();
HistoryServerState state=store.loadState();
assertEquals("incorrect loaded token count",1,state.tokenState.size());
assertTrue("missing token 1",state.tokenState.containsKey(token1));
assertEquals("incorrect token 1 date",newTokenDate1,state.tokenState.get(token1));
store.close();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRecovery() throws IOException {
Configuration conf=new Configuration();
HistoryServerStateStoreService store=new HistoryServerMemStateStoreService();
store.init(conf);
store.start();
JHSDelegationTokenSecretManagerForTest mgr=new JHSDelegationTokenSecretManagerForTest(store);
mgr.startThreads();
MRDelegationTokenIdentifier tokenId1=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser"));
Token token1=new Token(tokenId1,mgr);
MRDelegationTokenIdentifier tokenId2=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser"));
Token token2=new Token(tokenId2,mgr);
DelegationKey[] keys=mgr.getAllKeys();
long tokenRenewDate1=mgr.getAllTokens().get(tokenId1).getRenewDate();
long tokenRenewDate2=mgr.getAllTokens().get(tokenId2).getRenewDate();
mgr.stopThreads();
mgr=new JHSDelegationTokenSecretManagerForTest(store);
mgr.recover(store.loadState());
List recoveredKeys=Arrays.asList(mgr.getAllKeys());
for ( DelegationKey key : keys) {
assertTrue("key missing after recovery",recoveredKeys.contains(key));
}
assertTrue("token1 missing",mgr.getAllTokens().containsKey(tokenId1));
assertEquals("token1 renew date",tokenRenewDate1,mgr.getAllTokens().get(tokenId1).getRenewDate());
assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2));
assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate());
mgr.startThreads();
mgr.verifyToken(tokenId1,token1.getPassword());
mgr.verifyToken(tokenId2,token2.getPassword());
MRDelegationTokenIdentifier tokenId3=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser"));
Token token3=new Token(tokenId3,mgr);
assertEquals("sequence number restore",tokenId2.getSequenceNumber() + 1,tokenId3.getSequenceNumber());
mgr.cancelToken(token1,"tokenOwner");
MRDelegationTokenIdentifier tokenIdFull=new MRDelegationTokenIdentifier(new Text("tokenOwner/localhost@LOCALHOST"),new Text("tokenRenewer"),new Text("tokenUser"));
KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]");
Token tokenFull=new Token(tokenIdFull,mgr);
try {
mgr.cancelToken(tokenFull,"tokenOwner");
}
catch ( AccessControlException ace) {
assertTrue(ace.getMessage().contains("is not authorized to cancel the token"));
}
mgr.cancelToken(tokenFull,tokenIdFull.getOwner().toString());
long tokenRenewDate3=mgr.getAllTokens().get(tokenId3).getRenewDate();
mgr.stopThreads();
mgr=new JHSDelegationTokenSecretManagerForTest(store);
mgr.recover(store.loadState());
assertFalse("token1 should be missing",mgr.getAllTokens().containsKey(tokenId1));
assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2));
assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate());
assertTrue("token3 missing",mgr.getAllTokens().containsKey(tokenId3));
assertEquals("token3 renew date",tokenRenewDate3,mgr.getAllTokens().get(tokenId3).getRenewDate());
mgr.startThreads();
mgr.verifyToken(tokenId2,token2.getPassword());
mgr.verifyToken(tokenId3,token3.getPassword());
mgr.stopThreads();
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testRefreshLoadedJobCache() throws Exception {
HistoryFileManager historyManager=mock(HistoryFileManager.class);
jobHistory=spy(new JobHistory());
doReturn(historyManager).when(jobHistory).createHistoryFileManager();
Configuration conf=new Configuration();
conf.set(JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE,"2");
jobHistory.init(conf);
jobHistory.start();
CachedHistoryStorage storage=spy((CachedHistoryStorage)jobHistory.getHistoryStorage());
Job[] jobs=new Job[3];
JobId[] jobIds=new JobId[3];
for (int i=0; i < 3; i++) {
jobs[i]=mock(Job.class);
jobIds[i]=mock(JobId.class);
when(jobs[i].getID()).thenReturn(jobIds[i]);
}
HistoryFileInfo fileInfo=mock(HistoryFileInfo.class);
when(historyManager.getFileInfo(any(JobId.class))).thenReturn(fileInfo);
when(fileInfo.loadJob()).thenReturn(jobs[0]).thenReturn(jobs[1]).thenReturn(jobs[2]);
for (int i=0; i < 3; i++) {
storage.getFullJob(jobs[i].getID());
}
Map jobCache=storage.getLoadedJobCache();
assertFalse(jobCache.containsKey(jobs[0].getID()));
assertTrue(jobCache.containsKey(jobs[1].getID()) && jobCache.containsKey(jobs[2].getID()));
conf.set(JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE,"3");
doReturn(conf).when(storage).createConf();
when(fileInfo.loadJob()).thenReturn(jobs[0]).thenReturn(jobs[1]).thenReturn(jobs[2]);
jobHistory.refreshLoadedJobCache();
for (int i=0; i < 3; i++) {
storage.getFullJob(jobs[i].getID());
}
jobCache=storage.getLoadedJobCache();
for (int i=0; i < 3; i++) {
assertTrue(jobCache.containsKey(jobs[i].getID()));
}
}
InternalCallVerifier EqualityVerifier
@Test public void testRefreshJobRetentionSettings() throws IOException, InterruptedException {
String root="mockfs://foo/";
String historyDoneDir=root + "mapred/history/done";
long now=System.currentTimeMillis();
long someTimeYesterday=now - (25l * 3600 * 1000);
long timeBefore200Secs=now - (200l * 1000);
String timestampComponent=JobHistoryUtils.timestampDirectoryComponent(someTimeYesterday);
Path donePathYesterday=new Path(historyDoneDir,timestampComponent + "/" + "000000");
FileStatus dirCreatedYesterdayStatus=new FileStatus(0,true,0,0,someTimeYesterday,donePathYesterday);
timestampComponent=JobHistoryUtils.timestampDirectoryComponent(timeBefore200Secs);
Path donePathToday=new Path(historyDoneDir,timestampComponent + "/" + "000000");
FileStatus dirCreatedTodayStatus=new FileStatus(0,true,0,0,timeBefore200Secs,donePathToday);
Path fileUnderYesterdayDir=new Path(donePathYesterday.toString(),"job_1372363578825_0015-" + someTimeYesterday + "-user-Sleep+job-"+ someTimeYesterday+ "-1-1-SUCCEEDED-default.jhist");
FileStatus fileUnderYesterdayDirStatus=new FileStatus(10,false,0,0,someTimeYesterday,fileUnderYesterdayDir);
Path fileUnderTodayDir=new Path(donePathYesterday.toString(),"job_1372363578825_0016-" + timeBefore200Secs + "-user-Sleep+job-"+ timeBefore200Secs+ "-1-1-SUCCEEDED-default.jhist");
FileStatus fileUnderTodayDirStatus=new FileStatus(10,false,0,0,timeBefore200Secs,fileUnderTodayDir);
HistoryFileManager historyManager=spy(new HistoryFileManager());
jobHistory=spy(new JobHistory());
List fileStatusList=new LinkedList();
fileStatusList.add(dirCreatedYesterdayStatus);
fileStatusList.add(dirCreatedTodayStatus);
doReturn(4).when(jobHistory).getInitDelaySecs();
doReturn(historyManager).when(jobHistory).createHistoryFileManager();
List list1=new LinkedList();
list1.add(fileUnderYesterdayDirStatus);
doReturn(list1).when(historyManager).scanDirectoryForHistoryFiles(eq(donePathYesterday),any(FileContext.class));
List list2=new LinkedList();
list2.add(fileUnderTodayDirStatus);
doReturn(list2).when(historyManager).scanDirectoryForHistoryFiles(eq(donePathToday),any(FileContext.class));
doReturn(fileStatusList).when(historyManager).getHistoryDirsForCleaning(Mockito.anyLong());
doReturn(true).when(historyManager).deleteDir(any(FileStatus.class));
JobListCache jobListCache=mock(JobListCache.class);
HistoryFileInfo fileInfo=mock(HistoryFileInfo.class);
doReturn(jobListCache).when(historyManager).createJobListCache();
when(jobListCache.get(any(JobId.class))).thenReturn(fileInfo);
doNothing().when(fileInfo).delete();
Configuration conf=new Configuration();
conf.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS,24l * 3600 * 1000);
conf.setLong(JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS,2 * 1000);
jobHistory.init(conf);
jobHistory.start();
assertEquals(2 * 1000l,jobHistory.getCleanerInterval());
verify(fileInfo,timeout(20000).times(1)).delete();
fileStatusList.remove(dirCreatedYesterdayStatus);
conf.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS,10 * 1000);
conf.setLong(JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS,1 * 1000);
doReturn(conf).when(jobHistory).createConf();
jobHistory.refreshJobRetentionSettings();
assertEquals(1 * 1000l,jobHistory.getCleanerInterval());
verify(fileInfo,timeout(20000).times(2)).delete();
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Simple test of some methods of CompletedJob
* @throws Exception
*/
@Test(timeout=30000) public void testGetTaskAttemptCompletionEvent() throws Exception {
HistoryFileInfo info=mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager);
TaskCompletionEvent[] events=completedJob.getMapAttemptCompletionEvents(0,1000);
assertEquals(10,completedJob.getMapAttemptCompletionEvents(0,10).length);
int currentEventId=0;
for ( TaskCompletionEvent taskAttemptCompletionEvent : events) {
int eventId=taskAttemptCompletionEvent.getEventId();
assertTrue(eventId >= currentEventId);
currentEventId=eventId;
}
assertNull(completedJob.loadConfFile());
assertEquals("Sleep job",completedJob.getName());
assertEquals("default",completedJob.getQueueName());
assertEquals(1.0,completedJob.getProgress(),0.001);
assertEquals(12,completedJob.getTaskAttemptCompletionEvents(0,1000).length);
assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length);
assertEquals(7,completedJob.getTaskAttemptCompletionEvents(5,10).length);
assertEquals(1,completedJob.getDiagnostics().size());
assertEquals("",completedJob.getDiagnostics().get(0));
assertEquals(0,completedJob.getJobACLs().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testCompletedTask() throws Exception {
HistoryFileInfo info=mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager);
TaskId mt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.MAP);
TaskId rt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.REDUCE);
Map mapTasks=completedJob.getTasks(TaskType.MAP);
Map reduceTasks=completedJob.getTasks(TaskType.REDUCE);
assertEquals(10,mapTasks.size());
assertEquals(2,reduceTasks.size());
Task mt1=mapTasks.get(mt1Id);
assertEquals(1,mt1.getAttempts().size());
assertEquals(TaskState.SUCCEEDED,mt1.getState());
TaskReport mt1Report=mt1.getReport();
assertEquals(TaskState.SUCCEEDED,mt1Report.getTaskState());
assertEquals(mt1Id,mt1Report.getTaskId());
Task rt1=reduceTasks.get(rt1Id);
assertEquals(1,rt1.getAttempts().size());
assertEquals(TaskState.SUCCEEDED,rt1.getState());
TaskReport rt1Report=rt1.getReport();
assertEquals(TaskState.SUCCEEDED,rt1Report.getTaskState());
assertEquals(rt1Id,rt1Report.getTaskId());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testCompletedTaskAttempt() throws Exception {
HistoryFileInfo info=mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager);
TaskId mt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.MAP);
TaskId rt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.REDUCE);
TaskAttemptId mta1Id=MRBuilderUtils.newTaskAttemptId(mt1Id,0);
TaskAttemptId rta1Id=MRBuilderUtils.newTaskAttemptId(rt1Id,0);
Task mt1=completedJob.getTask(mt1Id);
Task rt1=completedJob.getTask(rt1Id);
TaskAttempt mta1=mt1.getAttempt(mta1Id);
assertEquals(TaskAttemptState.SUCCEEDED,mta1.getState());
assertEquals("localhost:45454",mta1.getAssignedContainerMgrAddress());
assertEquals("localhost:9999",mta1.getNodeHttpAddress());
TaskAttemptReport mta1Report=mta1.getReport();
assertEquals(TaskAttemptState.SUCCEEDED,mta1Report.getTaskAttemptState());
assertEquals("localhost",mta1Report.getNodeManagerHost());
assertEquals(45454,mta1Report.getNodeManagerPort());
assertEquals(9999,mta1Report.getNodeManagerHttpPort());
TaskAttempt rta1=rt1.getAttempt(rta1Id);
assertEquals(TaskAttemptState.SUCCEEDED,rta1.getState());
assertEquals("localhost:45454",rta1.getAssignedContainerMgrAddress());
assertEquals("localhost:9999",rta1.getNodeHttpAddress());
TaskAttemptReport rta1Report=rta1.getReport();
assertEquals(TaskAttemptState.SUCCEEDED,rta1Report.getTaskAttemptState());
assertEquals("localhost",rta1Report.getNodeManagerHost());
assertEquals(45454,rta1Report.getNodeManagerPort());
assertEquals(9999,rta1Report.getNodeManagerHttpPort());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=100000) public void testCompletedJob() throws Exception {
HistoryFileInfo info=mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager);
assertEquals(loadTasks,completedJob.tasksLoaded.get());
assertEquals(1,completedJob.getAMInfos().size());
assertEquals(10,completedJob.getCompletedMaps());
assertEquals(1,completedJob.getCompletedReduces());
assertEquals(12,completedJob.getTasks().size());
assertEquals(true,completedJob.tasksLoaded.get());
assertEquals(10,completedJob.getTasks(TaskType.MAP).size());
assertEquals(2,completedJob.getTasks(TaskType.REDUCE).size());
assertEquals("user",completedJob.getUserName());
assertEquals(JobState.SUCCEEDED,completedJob.getState());
JobReport jobReport=completedJob.getReport();
assertEquals("user",jobReport.getUser());
assertEquals(JobState.SUCCEEDED,jobReport.getJobState());
}
InternalCallVerifier EqualityVerifier
/**
* Verify that all the events are flushed on stopping the HistoryHandler
* @throws Exception
*/
@Test public void testEventsFlushOnStop() throws Exception {
Configuration conf=new Configuration();
MRApp app=new MRAppWithSpecialHistoryHandler(1,0,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryContext context=new JobHistory();
((JobHistory)context).init(conf);
Job parsedJob=context.getJob(jobId);
Assert.assertEquals("CompletedMaps not correct",1,parsedJob.getCompletedMaps());
Map tasks=parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct",1,tasks.size());
verifyTask(tasks.values().iterator().next());
Map maps=parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct",1,maps.size());
Assert.assertEquals("Job state not currect",JobState.SUCCEEDED,parsedJob.getState());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHistoryEvents() throws Exception {
Configuration conf=new Configuration();
MRApp app=new MRAppWithHistory(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryContext context=new JobHistory();
((JobHistory)context).init(conf);
((JobHistory)context).start();
Assert.assertTrue(context.getStartTime() > 0);
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);
Job parsedJob=context.getJob(jobId);
((JobHistory)context).stop();
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);
Assert.assertEquals("CompletedMaps not correct",2,parsedJob.getCompletedMaps());
Assert.assertEquals(System.getProperty("user.name"),parsedJob.getUserName());
Map tasks=parsedJob.getTasks();
Assert.assertEquals("No of tasks not correct",3,tasks.size());
for ( Task task : tasks.values()) {
verifyTask(task);
}
Map maps=parsedJob.getTasks(TaskType.MAP);
Assert.assertEquals("No of maps not correct",2,maps.size());
Map reduces=parsedJob.getTasks(TaskType.REDUCE);
Assert.assertEquals("No of reduces not correct",1,reduces.size());
Assert.assertEquals("CompletedReduce not correct",1,parsedJob.getCompletedReduces());
Assert.assertEquals("Job state not currect",JobState.SUCCEEDED,parsedJob.getState());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAssignedQueue() throws Exception {
Configuration conf=new Configuration();
MRApp app=new MRAppWithHistory(2,1,true,this.getClass().getName(),true,"assignedQueue");
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryContext context=new JobHistory();
((JobHistory)context).init(conf);
((JobHistory)context).start();
Assert.assertTrue(context.getStartTime() > 0);
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);
Job parsedJob=context.getJob(jobId);
((JobHistory)context).stop();
Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);
Assert.assertEquals("QueueName not correct","assignedQueue",parsedJob.getQueueName());
}
InternalCallVerifier EqualityVerifier
@Test public void testJobHistoryEventHandlerIsFirstServiceToStop(){
MRApp app=new MRAppWithSpecialHistoryHandler(1,0,true,this.getClass().getName(),true);
Configuration conf=new Configuration();
app.init(conf);
Service[] services=app.getServices().toArray(new Service[0]);
Assert.assertEquals("JobHistoryEventHandler",services[services.length - 1].getName());
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testDiagnosticsForKilledJob() throws Exception {
LOG.info("STARTING testDiagnosticsForKilledJob");
try {
final Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistoryWithJobKilled(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
app.waitForState(job,JobState.KILLED);
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory=new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath=fileInfo.getHistoryFile();
FSDataInputStream in=null;
FileContext fc=null;
try {
fc=FileContext.getFileContext(conf);
in=fc.open(fc.makeQualified(historyFilePath));
}
catch ( IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath,ioe);
throw (new Exception("Can not open History File"));
}
parser=new JobHistoryParser(in);
jobInfo=parser.parse();
}
Exception parseException=parser.getParseException();
assertNull("Caught an expected exception " + parseException,parseException);
final List originalDiagnostics=job.getDiagnostics();
final String historyError=jobInfo.getErrorInfo();
assertTrue("No original diagnostics for a failed job",originalDiagnostics != null && !originalDiagnostics.isEmpty());
assertNotNull("No history error info for a failed job ",historyError);
for ( String diagString : originalDiagnostics) {
assertTrue(historyError.contains(diagString));
}
assertTrue("No killed message in diagnostics",historyError.contains(JobImpl.JOB_KILLED_DIAG));
}
finally {
LOG.info("FINISHED testDiagnosticsForKilledJob");
}
}
InternalCallVerifier BooleanVerifier
@Test public void testMultipleFailedTasks() throws Exception {
JobHistoryParser parser=new JobHistoryParser(Mockito.mock(FSDataInputStream.class));
EventReader reader=Mockito.mock(EventReader.class);
final AtomicInteger numEventsRead=new AtomicInteger(0);
final org.apache.hadoop.mapreduce.TaskType taskType=org.apache.hadoop.mapreduce.TaskType.MAP;
final TaskID[] tids=new TaskID[2];
final JobID jid=new JobID("1",1);
tids[0]=new TaskID(jid,taskType,0);
tids[1]=new TaskID(jid,taskType,1);
Mockito.when(reader.getNextEvent()).thenAnswer(new Answer(){
public HistoryEvent answer( InvocationOnMock invocation) throws IOException {
int eventId=numEventsRead.getAndIncrement();
TaskID tid=tids[eventId & 0x1];
if (eventId < 2) {
return new TaskStartedEvent(tid,0,taskType,"");
}
if (eventId < 4) {
TaskFailedEvent tfe=new TaskFailedEvent(tid,0,taskType,"failed","FAILED",null,new Counters());
tfe.setDatum(tfe.getDatum());
return tfe;
}
if (eventId < 5) {
JobUnsuccessfulCompletionEvent juce=new JobUnsuccessfulCompletionEvent(jid,100L,2,0,"JOB_FAILED",Collections.singletonList("Task failed: " + tids[0].toString()));
return juce;
}
return null;
}
}
);
JobInfo info=parser.parse(reader);
assertTrue("Task 0 not implicated",info.getErrorInfo().contains(tids[0].toString()));
}
InternalCallVerifier EqualityVerifier
@Test public void testFailedJobHistoryWithoutDiagnostics() throws Exception {
final Path histPath=new Path(getClass().getClassLoader().getResource("job_1393307629410_0001-1393307687476-user-Sleep+job-1393307723835-0-0-FAILED-default-1393307693920.jhist").getFile());
final FileSystem lfs=FileSystem.getLocal(new Configuration());
final FSDataInputStream fsdis=lfs.open(histPath);
try {
JobHistoryParser parser=new JobHistoryParser(fsdis);
JobInfo info=parser.parse();
assertEquals("History parsed jobId incorrectly",info.getJobId(),JobID.forName("job_1393307629410_0001"));
assertEquals("Default diagnostics incorrect ","",info.getErrorInfo());
}
finally {
fsdis.close();
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Simple test PartialJob
*/
@Test(timeout=1000) public void testPartialJob() throws Exception {
JobId jobId=new JobIdPBImpl();
jobId.setId(0);
JobIndexInfo jii=new JobIndexInfo(0L,System.currentTimeMillis(),"user","jobName",jobId,3,2,"JobStatus");
PartialJob test=new PartialJob(jii,jobId);
Assert.assertEquals(1.0f,test.getProgress(),0.001f);
assertNull(test.getAllCounters());
assertNull(test.getTasks());
assertNull(test.getTasks(TaskType.MAP));
assertNull(test.getTask(new TaskIdPBImpl()));
assertNull(test.getTaskAttemptCompletionEvents(0,100));
assertNull(test.getMapAttemptCompletionEvents(0,100));
assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(),null));
assertNull(test.getAMInfos());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=60000) public void testCountersForFailedTask() throws Exception {
LOG.info("STARTING testCountersForFailedTask");
try {
Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistoryWithFailedTask(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
app.waitForState(job,JobState.FAILED);
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory=new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath=fileInfo.getHistoryFile();
FSDataInputStream in=null;
FileContext fc=null;
try {
fc=FileContext.getFileContext(conf);
in=fc.open(fc.makeQualified(historyFilePath));
}
catch ( IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath,ioe);
throw (new Exception("Can not open History File"));
}
parser=new JobHistoryParser(in);
jobInfo=parser.parse();
}
Exception parseException=parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException,parseException);
for ( Map.Entry entry : jobInfo.getAllTasks().entrySet()) {
TaskId yarnTaskID=TypeConverter.toYarn(entry.getKey());
CompletedTask ct=new CompletedTask(yarnTaskID,entry.getValue());
Assert.assertNotNull("completed task report has null counters",ct.getReport().getCounters());
}
final List originalDiagnostics=job.getDiagnostics();
final String historyError=jobInfo.getErrorInfo();
assertTrue("No original diagnostics for a failed job",originalDiagnostics != null && !originalDiagnostics.isEmpty());
assertNotNull("No history error info for a failed job ",historyError);
for ( String diagString : originalDiagnostics) {
assertTrue(historyError.contains(diagString));
}
}
finally {
LOG.info("FINISHED testCountersForFailedTask");
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=50000) public void testScanningOldDirs() throws Exception {
LOG.info("STARTING testScanningOldDirs");
try {
Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryFileManagerForTest hfm=new HistoryFileManagerForTest();
hfm.init(conf);
HistoryFileInfo fileInfo=hfm.getFileInfo(jobId);
Assert.assertNotNull("Unable to locate job history",fileInfo);
hfm.deleteJobFromJobListCache(fileInfo);
final int msecPerSleep=10;
int msecToSleep=10 * 1000;
while (fileInfo.isMovePending() && msecToSleep > 0) {
Assert.assertTrue(!fileInfo.didMoveFail());
msecToSleep-=msecPerSleep;
Thread.sleep(msecPerSleep);
}
Assert.assertTrue("Timeout waiting for history move",msecToSleep > 0);
fileInfo=hfm.getFileInfo(jobId);
hfm.stop();
Assert.assertNotNull("Unable to locate old job history",fileInfo);
Assert.assertTrue("HistoryFileManager not shutdown properly",hfm.moveToDoneExecutor.isTerminated());
}
finally {
LOG.info("FINISHED testScanningOldDirs");
}
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test clean old history files. Files should be deleted after 1 week by
* default.
*/
@Test(timeout=15000) public void testDeleteFileInfo() throws Exception {
LOG.info("STARTING testDeleteFileInfo");
try {
Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
HistoryFileManager hfm=new HistoryFileManager();
hfm.init(conf);
HistoryFileInfo fileInfo=hfm.getFileInfo(jobId);
hfm.initExisting();
while (fileInfo.isMovePending()) {
Thread.sleep(300);
}
Assert.assertNotNull(hfm.jobListCache.values());
hfm.clean();
Assert.assertFalse(fileInfo.isDeleted());
hfm.setMaxHistoryAge(-1);
hfm.clean();
hfm.stop();
Assert.assertTrue("Thread pool shutdown",hfm.moveToDoneExecutor.isTerminated());
Assert.assertTrue("file should be deleted ",fileInfo.isDeleted());
}
finally {
LOG.info("FINISHED testDeleteFileInfo");
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Simple test some methods of JobHistory
*/
@Test(timeout=20000) public void testJobHistoryMethods() throws Exception {
LOG.info("STARTING testJobHistoryMethods");
try {
Configuration configuration=new Configuration();
configuration.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(configuration);
MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true);
app.submit(configuration);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
app.waitForState(job,JobState.SUCCEEDED);
JobHistory jobHistory=new JobHistory();
jobHistory.init(configuration);
Assert.assertEquals(1,jobHistory.getAllJobs().size());
Assert.assertEquals(1,jobHistory.getAllJobs(app.getAppID()).size());
JobsInfo jobsinfo=jobHistory.getPartialJobs(0L,10L,null,"default",0L,System.currentTimeMillis() + 1,0L,System.currentTimeMillis() + 1,JobState.SUCCEEDED);
Assert.assertEquals(1,jobsinfo.getJobs().size());
Assert.assertNotNull(jobHistory.getApplicationAttemptId());
Assert.assertEquals("application_0_0000",jobHistory.getApplicationID().toString());
Assert.assertEquals("Job History Server",jobHistory.getApplicationName());
Assert.assertNull(jobHistory.getEventHandler());
Assert.assertNull(jobHistory.getClock());
Assert.assertNull(jobHistory.getClusterInfo());
}
finally {
LOG.info("FINISHED testJobHistoryMethods");
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testHistoryParsingForFailedAttempts() throws Exception {
LOG.info("STARTING testHistoryParsingForFailedAttempts");
try {
Configuration conf=new Configuration();
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(conf);
MRApp app=new MRAppWithHistoryWithFailedAttempt(2,1,true,this.getClass().getName(),true);
app.submit(conf);
Job job=app.getContext().getAllJobs().values().iterator().next();
JobId jobId=job.getID();
app.waitForState(job,JobState.SUCCEEDED);
app.waitForState(Service.STATE.STOPPED);
JobHistory jobHistory=new JobHistory();
jobHistory.init(conf);
HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId);
JobHistoryParser parser;
JobInfo jobInfo;
synchronized (fileInfo) {
Path historyFilePath=fileInfo.getHistoryFile();
FSDataInputStream in=null;
FileContext fc=null;
try {
fc=FileContext.getFileContext(conf);
in=fc.open(fc.makeQualified(historyFilePath));
}
catch ( IOException ioe) {
LOG.info("Can not open history file: " + historyFilePath,ioe);
throw (new Exception("Can not open History File"));
}
parser=new JobHistoryParser(in);
jobInfo=parser.parse();
}
Exception parseException=parser.getParseException();
Assert.assertNull("Caught an expected exception " + parseException,parseException);
int noOffailedAttempts=0;
Map allTasks=jobInfo.getAllTasks();
for ( Task task : job.getTasks().values()) {
TaskInfo taskInfo=allTasks.get(TypeConverter.fromYarn(task.getID()));
for ( TaskAttempt taskAttempt : task.getAttempts().values()) {
TaskAttemptInfo taskAttemptInfo=taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID())));
Assert.assertEquals("rack-name is incorrect",taskAttemptInfo.getRackname(),RACK_NAME);
if (taskAttemptInfo.getTaskStatus().equals("FAILED")) {
noOffailedAttempts++;
}
}
}
Assert.assertEquals("No of Failed tasks doesn't match.",2,noOffailedAttempts);
}
finally {
LOG.info("FINISHED testHistoryParsingForFailedAttempts");
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=50000) public void testReports() throws Exception {
Configuration config=new Configuration();
config.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class);
RackResolver.init(config);
MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true);
app.submit(config);
Job job=app.getContext().getAllJobs().values().iterator().next();
app.waitForState(job,JobState.SUCCEEDED);
historyServer=new JobHistoryServer();
historyServer.init(config);
historyServer.start();
JobHistory jobHistory=null;
for ( Service service : historyServer.getServices()) {
if (service instanceof JobHistory) {
jobHistory=(JobHistory)service;
}
}
;
Map jobs=jobHistory.getAllJobs();
assertEquals(1,jobs.size());
assertEquals("job_0_0000",jobs.keySet().iterator().next().toString());
Task task=job.getTasks().values().iterator().next();
TaskAttempt attempt=task.getAttempts().values().iterator().next();
HistoryClientService historyService=historyServer.getClientService();
MRClientProtocol protocol=historyService.getClientHandler();
GetTaskAttemptReportRequest gtarRequest=recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
TaskAttemptId taId=attempt.getID();
taId.setTaskId(task.getID());
taId.getTaskId().setJobId(job.getID());
gtarRequest.setTaskAttemptId(taId);
GetTaskAttemptReportResponse response=protocol.getTaskAttemptReport(gtarRequest);
assertEquals("container_0_0000_01_000000",response.getTaskAttemptReport().getContainerId().toString());
assertTrue(response.getTaskAttemptReport().getDiagnosticInfo().isEmpty());
assertNotNull(response.getTaskAttemptReport().getCounters().getCounter(TaskCounter.PHYSICAL_MEMORY_BYTES));
assertEquals(taId.toString(),response.getTaskAttemptReport().getTaskAttemptId().toString());
GetTaskReportRequest request=recordFactory.newRecordInstance(GetTaskReportRequest.class);
TaskId taskId=task.getID();
taskId.setJobId(job.getID());
request.setTaskId(taskId);
GetTaskReportResponse reportResponse=protocol.getTaskReport(request);
assertEquals("",reportResponse.getTaskReport().getDiagnosticsList().iterator().next());
assertEquals(1.0f,reportResponse.getTaskReport().getProgress(),0.01);
assertEquals(taskId.toString(),reportResponse.getTaskReport().getTaskId().toString());
assertEquals(TaskState.SUCCEEDED,reportResponse.getTaskReport().getTaskState());
GetTaskAttemptCompletionEventsRequest taskAttemptRequest=recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
taskAttemptRequest.setJobId(job.getID());
GetTaskAttemptCompletionEventsResponse taskAttemptCompletionEventsResponse=protocol.getTaskAttemptCompletionEvents(taskAttemptRequest);
assertEquals(0,taskAttemptCompletionEventsResponse.getCompletionEventCount());
GetDiagnosticsRequest diagnosticRequest=recordFactory.newRecordInstance(GetDiagnosticsRequest.class);
diagnosticRequest.setTaskAttemptId(taId);
GetDiagnosticsResponse diagnosticResponse=protocol.getDiagnostics(diagnosticRequest);
assertEquals(1,diagnosticResponse.getDiagnosticsCount());
assertEquals("",diagnosticResponse.getDiagnostics(0));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=50000) public void testStartStopServer() throws Exception {
historyServer=new JobHistoryServer();
Configuration config=new Configuration();
historyServer.init(config);
assertEquals(STATE.INITED,historyServer.getServiceState());
assertEquals(6,historyServer.getServices().size());
HistoryClientService historyService=historyServer.getClientService();
assertNotNull(historyServer.getClientService());
assertEquals(STATE.INITED,historyService.getServiceState());
historyServer.start();
assertEquals(STATE.STARTED,historyServer.getServiceState());
assertEquals(STATE.STARTED,historyService.getServiceState());
historyServer.stop();
assertEquals(STATE.STOPPED,historyServer.getServiceState());
assertNotNull(historyService.getClientHandler().getConnectAddress());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Trivial test case that verifies basic functionality of {@link JobIdHistoryFileInfoMap}
*/
@Test(timeout=2000) public void testWithSingleElement() throws InterruptedException {
JobIdHistoryFileInfoMap mapWithSize=new JobIdHistoryFileInfoMap();
JobId jobId=MRBuilderUtils.newJobId(1,1,1);
HistoryFileInfo fileInfo1=Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo1.getJobId()).thenReturn(jobId);
assertEquals("Incorrect return on putIfAbsent()",null,mapWithSize.putIfAbsent(jobId,fileInfo1));
assertEquals("Incorrect return on putIfAbsent()",fileInfo1,mapWithSize.putIfAbsent(jobId,fileInfo1));
assertEquals("Incorrect get()",fileInfo1,mapWithSize.get(jobId));
assertTrue("Incorrect size()",checkSize(mapWithSize,1));
NavigableSet set=mapWithSize.navigableKeySet();
assertEquals("Incorrect navigableKeySet()",1,set.size());
assertTrue("Incorrect navigableKeySet()",set.contains(jobId));
Collection values=mapWithSize.values();
assertEquals("Incorrect values()",1,values.size());
assertTrue("Incorrect values()",values.contains(fileInfo1));
}
IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=1000) public void testEviction() throws InterruptedException {
int maxSize=2;
JobListCache cache=new JobListCache(maxSize,1000);
JobId jobId1=MRBuilderUtils.newJobId(1,1,1);
HistoryFileInfo fileInfo1=Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo1.getJobId()).thenReturn(jobId1);
JobId jobId2=MRBuilderUtils.newJobId(2,2,2);
HistoryFileInfo fileInfo2=Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo2.getJobId()).thenReturn(jobId2);
JobId jobId3=MRBuilderUtils.newJobId(3,3,3);
HistoryFileInfo fileInfo3=Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo3.getJobId()).thenReturn(jobId3);
cache.addIfAbsent(fileInfo1);
cache.addIfAbsent(fileInfo2);
cache.addIfAbsent(fileInfo3);
Collection values;
for (int i=0; i < 9; i++) {
values=cache.values();
if (values.size() > maxSize) {
Thread.sleep(100);
}
else {
assertFalse("fileInfo1 should have been evicted",values.contains(fileInfo1));
return;
}
}
fail("JobListCache didn't delete the extra entry");
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testAddExisting(){
JobListCache cache=new JobListCache(2,1000);
JobId jobId=MRBuilderUtils.newJobId(1,1,1);
HistoryFileInfo fileInfo=Mockito.mock(HistoryFileInfo.class);
Mockito.when(fileInfo.getJobId()).thenReturn(jobId);
cache.addIfAbsent(fileInfo);
cache.addIfAbsent(fileInfo);
assertEquals("Incorrect number of cache entries",1,cache.values().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testGetGroups() throws Exception {
String user=UserGroupInformation.getCurrentUser().getUserName();
String[] args=new String[2];
args[0]="-getGroups";
args[1]=user;
int exitCode=hsAdminClient.run(args);
assertEquals("Exit code should be 0 but was: " + exitCode,0,exitCode);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRefreshUserToGroupsMappings() throws Exception {
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(conf);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
hsAdminClient.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* test HsController
*/
@Test public void testHsController() throws Exception {
AppContext ctx=mock(AppContext.class);
ApplicationId appId=ApplicationIdPBImpl.newInstance(0,5);
when(ctx.getApplicationID()).thenReturn(appId);
AppForTest app=new AppForTest(ctx);
Configuration config=new Configuration();
RequestContext requestCtx=mock(RequestContext.class);
HsControllerForTest controller=new HsControllerForTest(app,config,requestCtx);
controller.index();
assertEquals("JobHistory",controller.get(Params.TITLE,""));
assertEquals(HsJobPage.class,controller.jobPage());
assertEquals(HsCountersPage.class,controller.countersPage());
assertEquals(HsTasksPage.class,controller.tasksPage());
assertEquals(HsTaskPage.class,controller.taskPage());
assertEquals(HsAttemptsPage.class,controller.attemptsPage());
controller.set(AMParams.JOB_ID,"job_01_01");
controller.set(AMParams.TASK_ID,"task_01_01_m01_01");
controller.set(AMParams.TASK_TYPE,"m");
controller.set(AMParams.ATTEMPT_STATE,"State");
Job job=mock(Job.class);
Task task=mock(Task.class);
when(job.getTask(any(TaskId.class))).thenReturn(task);
JobId jobID=MRApps.toJobID("job_01_01");
when(ctx.getJob(jobID)).thenReturn(job);
when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true);
controller.job();
assertEquals(HsJobPage.class,controller.getClazz());
controller.jobCounters();
assertEquals(HsCountersPage.class,controller.getClazz());
controller.taskCounters();
assertEquals(HsCountersPage.class,controller.getClazz());
controller.tasks();
assertEquals(HsTasksPage.class,controller.getClazz());
controller.task();
assertEquals(HsTaskPage.class,controller.getClazz());
controller.attempts();
assertEquals(HsAttemptsPage.class,controller.getClazz());
assertEquals(HsConfPage.class,controller.confPage());
assertEquals(HsAboutPage.class,controller.aboutPage());
controller.about();
assertEquals(HsAboutPage.class,controller.getClazz());
controller.logs();
assertEquals(HsLogsPage.class,controller.getClazz());
controller.nmlogs();
assertEquals(AggregatedLogsPage.class,controller.getClazz());
assertEquals(HsSingleCounterPage.class,controller.singleCounterPage());
controller.singleJobCounter();
assertEquals(HsSingleCounterPage.class,controller.getClazz());
controller.singleTaskCounter();
assertEquals(HsSingleCounterPage.class,controller.getClazz());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppControllerIndex(){
MockAppContext ctx=new MockAppContext(0,1,1,1);
Injector injector=WebAppTests.createMockInjector(AppContext.class,ctx);
HsController controller=injector.getInstance(HsController.class);
controller.index();
assertEquals(ctx.getApplicationID().toString(),controller.get(APP_ID,""));
}
InternalCallVerifier EqualityVerifier
@Test public void testInfoDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("info/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyHSInfo(json.getJSONObject("historyInfo"),appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testHSDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyHSInfo(json.getJSONObject("historyInfo"),appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testInfoXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("info/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
verifyHSInfoXML(xml,appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testInfo() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("info").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyHSInfo(json.getJSONObject("historyInfo"),appContext);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidAccept() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("history").accept(MediaType.TEXT_PLAIN).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri2() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("invalid").accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testHSXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
verifyHSInfoXML(xml,appContext);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("history").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testInfoSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("info/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyHSInfo(json.getJSONObject("historyInfo"),appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testHSSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyHSInfo(json.getJSONObject("historyInfo"),appContext);
}
InternalCallVerifier EqualityVerifier
@Test public void testHS() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
verifyHSInfo(json.getJSONObject("historyInfo"),appContext);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyHsTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdXMLCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("jobTaskAttemptCounters");
verifyHsTaskCountersXML(nodes,att);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyHsTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptsXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList attempts=dom.getElementsByTagName("taskAttempts");
assertEquals("incorrect number of elements",1,attempts.getLength());
NodeList nodes=dom.getElementsByTagName("taskAttempt");
verifyHsTaskAttemptsXML(nodes,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskAttemptCounters");
verifyHsJobTaskAttemptCounters(info,att);
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttempts() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyHsTaskAttempts(json,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("taskAttempt");
for (int i=0; i < nodes.getLength(); i++) {
Element element=(Element)nodes.item(i);
verifyHsTaskAttemptXML(element,att,task.getType());
}
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskAttemptIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
for ( TaskAttempt att : task.getAttempts().values()) {
TaskAttemptId attemptid=att.getID();
String attid=MRApps.toString(attemptid);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("taskAttempt");
verifyHsTaskAttempt(info,att,task.getType());
}
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyHsJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConf() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyHsJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("conf");
verifyHsJobConf(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobConfXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("conf");
verifyHsJobConfXML(info,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyHsJobAttempts(info,appContext.getJob(id));
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdInvalidDefault() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_foo").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
verifyJobIdInvalid(message,type,classname);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdInvalidXML() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_XML).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String msg=response.getEntity(String.class);
System.out.println(msg);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(msg));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("RemoteException");
Element element=(Element)nodes.item(0);
String message=WebServicesTestUtils.getXmlString(element,"message");
String type=WebServicesTestUtils.getXmlString(element,"exception");
String classname=WebServicesTestUtils.getXmlString(element,"javaClassName");
verifyJobIdInvalid(message,type,classname);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdNonExist() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_0_1234").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: job, job_0_1234, is not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobCountersForKilledJob() throws Exception {
WebResource r=resource();
appContext=new MockHistoryContext(0,1,1,1,true);
injector=Guice.createInjector(new ServletModule(){
@Override protected void configureServlets(){
webApp=mock(HsWebApp.class);
when(webApp.name()).thenReturn("hsmockwebapp");
bind(JAXBContextResolver.class);
bind(HsWebServices.class);
bind(GenericExceptionHandler.class);
bind(WebApp.class).toInstance(webApp);
bind(AppContext.class).toInstance(appContext);
bind(HistoryContext.class).toInstance(appContext);
bind(Configuration.class).toInstance(conf);
serve("/*").with(GuiceContainer.class);
}
}
);
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
WebServicesTestUtils.checkStringMatch("id",MRApps.toString(id),info.getString("id"));
assertTrue("Job shouldn't contain any counters",info.length() == 1);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttempts() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyHsJobAttempts(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyHsJobCounters(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
VerifyJobsUtils.verifyHsJob(info,appContext.getJob(id));
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdInvalidBogus() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("bogusfoo").get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: JobId string : " + "bogusfoo is not properly formed",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobAttempts");
verifyHsJobAttempts(info,appContext.getJob(id));
}
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",1,arr.length());
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getPartialJob(MRApps.toJobID(info.getString("id")));
VerifyJobsUtils.verifyHsJobPartial(info,job);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
VerifyJobsUtils.verifyHsJob(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList job=dom.getElementsByTagName("job");
verifyHsJobXML(job,appContext);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyHsJobCounters(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobCounters");
verifyHsJobCounters(info,appContext.getJob(id));
}
}
InternalCallVerifier EqualityVerifier
@Test public void testJobs() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",1,arr.length());
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getPartialJob(MRApps.toJobID(info.getString("id")));
VerifyJobsUtils.verifyHsJobPartial(info,job);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobAttemptsXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList attempts=dom.getElementsByTagName("jobAttempts");
assertEquals("incorrect number of elements",1,attempts.getLength());
NodeList info=dom.getElementsByTagName("jobAttempt");
verifyHsJobAttemptsXML(info,appContext.getJob(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsXML() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList jobs=dom.getElementsByTagName("jobs");
assertEquals("incorrect number of elements",1,jobs.getLength());
NodeList job=dom.getElementsByTagName("job");
assertEquals("incorrect number of elements",1,job.getLength());
verifyHsJobPartialXML(job,appContext);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("job");
VerifyJobsUtils.verifyHsJob(info,appContext.getJob(id));
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testJobIdInvalid() throws JSONException, Exception {
WebResource r=resource();
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
verifyJobIdInvalid(message,type,classname);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobCountersXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("jobCounters");
verifyHsJobCountersXML(info,appContext.getJob(id));
}
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",1,arr.length());
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getPartialJob(MRApps.toJobID(info.getString("id")));
VerifyJobsUtils.verifyHsJobPartial(info,job);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeBegin() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryUserNone() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("user","bogus").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeEndNegative() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeEnd must be greater than 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryQueueNonExist() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("queue","bogus").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobsQueryFinishTimeBeginEnd() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
int size=jobsMap.size();
ArrayList finishTime=new ArrayList(size);
for ( Map.Entry entry : jobsMap.entrySet()) {
finishTime.add(entry.getValue().getReport().getFinishTime());
}
Collections.sort(finishTime);
assertTrue("Error we must have atleast 3 jobs",size >= 3);
long midFinishTime=finishTime.get(size - 2);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(40000)).queryParam("finishedTimeEnd",String.valueOf(midFinishTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",size - 1,arr.length());
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeEndInvalidformat() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeInvalidformat() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeInvalidformat() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeEndInvalidformat() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryLimit() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("limit","2").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",2,arr.length());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeEnd() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobsQueryStartTimeBeginEnd() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
int size=jobsMap.size();
ArrayList startTime=new ArrayList(size);
for ( Map.Entry entry : jobsMap.entrySet()) {
startTime.add(entry.getValue().getReport().getStartTime());
}
Collections.sort(startTime);
assertTrue("Error we must have atleast 3 jobs",size >= 3);
long midStartTime=startTime.get(size - 2);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(40000)).queryParam("startedTimeEnd",String.valueOf(midStartTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",size - 1,arr.length());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeBeginEndInvalid() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(now)).queryParam("finishedTimeEnd",String.valueOf(40000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeEnd must be greater than finishedTimeBegin",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStateInvalid() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state","InvalidState").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringContains("exception message","org.apache.hadoop.mapreduce.v2.api.records.JobState.InvalidState",message);
WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type);
WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeEndNegative() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeEnd must be greater than 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryUser() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("user","mock").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
System.out.println(json.toString());
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getPartialJob(MRApps.toJobID(info.getString("id")));
VerifyJobsUtils.verifyHsJobPartial(info,job);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryState() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
String queryState="BOGUS";
JobId jid=null;
for ( Map.Entry entry : jobsMap.entrySet()) {
jid=entry.getValue().getID();
queryState=entry.getValue().getState().toString();
break;
}
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",queryState).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",1,arr.length());
JSONObject info=arr.getJSONObject(0);
Job job=appContext.getPartialJob(jid);
VerifyJobsUtils.verifyHsJobPartial(info,job);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeBeginNegative() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeBegin must be greater than 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryLimitInvalid() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("limit","-1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: limit value must be greater then 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeBegin() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeNegative() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeBegin must be greater than 0",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testJobsQueryStateNone() throws JSONException, Exception {
WebResource r=resource();
ArrayList JOB_STATES=new ArrayList(Arrays.asList(JobState.values()));
Map jobsMap=appContext.getAllJobs();
for ( Map.Entry entry : jobsMap.entrySet()) {
JOB_STATES.remove(entry.getValue().getState());
}
assertTrue("No unused job states",JOB_STATES.size() > 0);
JobState notInUse=JOB_STATES.get(0);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",notInUse.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryFinishTimeEnd() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryStartTimeBeginEndInvalid() throws JSONException, Exception {
WebResource r=resource();
Long now=System.currentTimeMillis();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(now)).queryParam("startedTimeEnd",String.valueOf(40000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeEnd must be greater than startTimeBegin",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
InternalCallVerifier EqualityVerifier
@Test public void testJobsQueryQueue() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("queue","mockqueue").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject jobs=json.getJSONObject("jobs");
JSONArray arr=jobs.getJSONArray("job");
assertEquals("incorrect number of elements",3,arr.length());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdInvalid() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="task_0_0000_d_000000";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Bad TaskType identifier. TaskId string : " + "task_0_0000_d_000000 is not properly formed.",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCounters() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyHsJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCountersSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyHsJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksQueryReduce() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String type="r";
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",1,arr.length());
verifyHsTask(arr,jobsMap.get(id),type);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testJobTaskCountersXML() throws Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList info=dom.getElementsByTagName("jobTaskCounters");
verifyHsTaskCountersXML(info,task);
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdNonExist() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="task_0_0000_m_000000";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: task not found with id task_0_0000_m_000000",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdCountersDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("jobTaskCounters");
verifyHsJobTaskCounters(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList tasks=dom.getElementsByTagName("tasks");
assertEquals("incorrect number of elements",1,tasks.getLength());
NodeList task=dom.getElementsByTagName("task");
verifyHsTaskXML(task,jobsMap.get(id));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyHsTask(arr,jobsMap.get(id),null);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdInvalid3() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="task_0_0000_m";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0_0000_m is not properly formed",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskId() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyHsSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyHsSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksDefault() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyHsTask(arr,jobsMap.get(id),null);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdInvalid2() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="task_0000_m_000000";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0000_m_000000 is not properly formed",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasks() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",2,arr.length());
verifyHsTask(arr,jobsMap.get(id),null);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdSlash() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject info=json.getJSONObject("task");
verifyHsSingleTask(info,task);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTasksQueryMap() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String type="m";
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject tasks=json.getJSONObject("tasks");
JSONArray arr=tasks.getJSONArray("task");
assertEquals("incorrect number of elements",1,arr.length());
verifyHsTask(arr,jobsMap.get(id),type);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTaskIdXML() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
for ( Task task : jobsMap.get(id).getTasks().values()) {
String tid=MRApps.toString(task.getID());
ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("task");
for (int i=0; i < nodes.getLength(); i++) {
Element element=(Element)nodes.item(i);
verifyHsSingleTaskXML(element,task);
}
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTaskIdBogus() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tid="bogustaskid";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "bogustaskid is not properly formed",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testTasksQueryInvalid() throws JSONException, Exception {
WebResource r=resource();
Map jobsMap=appContext.getAllJobs();
for ( JobId id : jobsMap.keySet()) {
String jobId=MRApps.toString(id);
String tasktype="reduce";
try {
r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",tasktype).accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: tasktype must be either m or r",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testProducesHistoryServerUriForAppId() throws URISyntaxException {
final String historyAddress="example.net:424242";
YarnConfiguration conf=new YarnConfiguration();
conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,historyAddress);
MapReduceTrackingUriPlugin plugin=new MapReduceTrackingUriPlugin();
plugin.setConf(conf);
ApplicationId id=ApplicationId.newInstance(6384623l,5);
String jobSuffix=id.toString().replaceFirst("^application_","job_");
URI expected=new URI("http://" + historyAddress + "/jobhistory/job/"+ jobSuffix);
URI actual=plugin.getTrackingUri(id);
assertEquals(expected,actual);
}
InternalCallVerifier EqualityVerifier
@Test public void testAverageReduceTime(){
Job job=mock(CompletedJob.class);
final Task task1=mock(Task.class);
final Task task2=mock(Task.class);
JobId jobId=MRBuilderUtils.newJobId(1L,1,1);
final TaskId taskId1=MRBuilderUtils.newTaskId(jobId,1,TaskType.REDUCE);
final TaskId taskId2=MRBuilderUtils.newTaskId(jobId,2,TaskType.REDUCE);
final TaskAttemptId taskAttemptId1=MRBuilderUtils.newTaskAttemptId(taskId1,1);
final TaskAttemptId taskAttemptId2=MRBuilderUtils.newTaskAttemptId(taskId2,2);
final TaskAttempt taskAttempt1=mock(TaskAttempt.class);
final TaskAttempt taskAttempt2=mock(TaskAttempt.class);
JobReport jobReport=mock(JobReport.class);
when(taskAttempt1.getState()).thenReturn(TaskAttemptState.SUCCEEDED);
when(taskAttempt1.getLaunchTime()).thenReturn(0L);
when(taskAttempt1.getShuffleFinishTime()).thenReturn(4L);
when(taskAttempt1.getSortFinishTime()).thenReturn(6L);
when(taskAttempt1.getFinishTime()).thenReturn(8L);
when(taskAttempt2.getState()).thenReturn(TaskAttemptState.SUCCEEDED);
when(taskAttempt2.getLaunchTime()).thenReturn(5L);
when(taskAttempt2.getShuffleFinishTime()).thenReturn(10L);
when(taskAttempt2.getSortFinishTime()).thenReturn(22L);
when(taskAttempt2.getFinishTime()).thenReturn(42L);
when(task1.getType()).thenReturn(TaskType.REDUCE);
when(task2.getType()).thenReturn(TaskType.REDUCE);
when(task1.getAttempts()).thenReturn(new HashMap(){
{
put(taskAttemptId1,taskAttempt1);
}
}
);
when(task2.getAttempts()).thenReturn(new HashMap(){
{
put(taskAttemptId2,taskAttempt2);
}
}
);
when(job.getTasks()).thenReturn(new HashMap(){
{
put(taskId1,task1);
put(taskId2,task2);
}
}
);
when(job.getID()).thenReturn(jobId);
when(job.getReport()).thenReturn(jobReport);
when(job.getName()).thenReturn("TestJobInfo");
when(job.getState()).thenReturn(JobState.SUCCEEDED);
JobInfo jobInfo=new JobInfo(job);
Assert.assertEquals(11L,jobInfo.getAvgReduceTime().longValue());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testAverageMergeTime() throws IOException {
String historyFileName="job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
String confFileName="job_1329348432655_0001_conf.xml";
Configuration conf=new Configuration();
JobACLsManager jobAclsMgr=new JobACLsManager(conf);
Path fulleHistoryPath=new Path(TestJobHistoryEntities.class.getClassLoader().getResource(historyFileName).getFile());
Path fullConfPath=new Path(TestJobHistoryEntities.class.getClassLoader().getResource(confFileName).getFile());
HistoryFileInfo info=mock(HistoryFileInfo.class);
when(info.getConfFile()).thenReturn(fullConfPath);
JobId jobId=MRBuilderUtils.newJobId(1329348432655l,1,1);
CompletedJob completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,true,"user",info,jobAclsMgr);
JobInfo jobInfo=new JobInfo(completedJob);
Assert.assertEquals(50L,jobInfo.getAvgMergeTime().longValue());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testJobHistoryFileNameBackwardsCompatible() throws IOException {
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
long submitTime=Long.parseLong(SUBMIT_TIME);
long finishTime=Long.parseLong(FINISH_TIME);
int numMaps=Integer.parseInt(NUM_MAPS);
int numReduces=Integer.parseInt(NUM_REDUCES);
String jobHistoryFile=String.format(OLD_JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS);
JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job id incorrect after decoding old history file",jobId,info.getJobId());
Assert.assertEquals("Submit time incorrect after decoding old history file",submitTime,info.getSubmitTime());
Assert.assertEquals("User incorrect after decoding old history file",USER_NAME,info.getUser());
Assert.assertEquals("Job name incorrect after decoding old history file",JOB_NAME,info.getJobName());
Assert.assertEquals("Finish time incorrect after decoding old history file",finishTime,info.getFinishTime());
Assert.assertEquals("Num maps incorrect after decoding old history file",numMaps,info.getNumMaps());
Assert.assertEquals("Num reduces incorrect after decoding old history file",numReduces,info.getNumReduces());
Assert.assertEquals("Job status incorrect after decoding old history file",JOB_STATUS,info.getJobStatus());
Assert.assertNull("Queue name incorrect after decoding old history file",info.getQueueName());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testEncodingDecodingEquivalence() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
JobIndexInfo parsedInfo=FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job id different after encoding and decoding",info.getJobId(),parsedInfo.getJobId());
Assert.assertEquals("Submit time different after encoding and decoding",info.getSubmitTime(),parsedInfo.getSubmitTime());
Assert.assertEquals("User different after encoding and decoding",info.getUser(),parsedInfo.getUser());
Assert.assertEquals("Job name different after encoding and decoding",info.getJobName(),parsedInfo.getJobName());
Assert.assertEquals("Finish time different after encoding and decoding",info.getFinishTime(),parsedInfo.getFinishTime());
Assert.assertEquals("Num maps different after encoding and decoding",info.getNumMaps(),parsedInfo.getNumMaps());
Assert.assertEquals("Num reduces different after encoding and decoding",info.getNumReduces(),parsedInfo.getNumReduces());
Assert.assertEquals("Job status different after encoding and decoding",info.getJobStatus(),parsedInfo.getJobStatus());
Assert.assertEquals("Queue name different after encoding and decoding",info.getQueueName(),parsedInfo.getQueueName());
Assert.assertEquals("Job start time different after encoding and decoding",info.getJobStartTime(),parsedInfo.getJobStartTime());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testUserNamePercentEncoding() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME_WITH_DELIMITER);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("User name not encoded correctly into job history file",jobHistoryFile.contains(USER_NAME_WITH_DELIMITER_ESCAPE));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testQueueNamePercentEncoding() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME_WITH_DELIMITER);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("Queue name not encoded correctly into job history file",jobHistoryFile.contains(QUEUE_NAME_WITH_DELIMITER_ESCAPE));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testJobNamePercentEncoding() throws IOException {
JobIndexInfo info=new JobIndexInfo();
JobID oldJobId=JobID.forName(JOB_ID);
JobId jobId=TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME_WITH_DELIMITER);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("Job name not encoded correctly into job history file",jobHistoryFile.contains(JOB_NAME_WITH_DELIMITER_ESCAPE));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testToTaskID(){
TaskId tid=MRApps.toTaskID("task_1_2_r_3");
assertEquals(1,tid.getJobId().getAppId().getClusterTimestamp());
assertEquals(2,tid.getJobId().getAppId().getId());
assertEquals(2,tid.getJobId().getId());
assertEquals(TaskType.REDUCE,tid.getTaskType());
assertEquals(3,tid.getId());
tid=MRApps.toTaskID("task_1_2_m_3");
assertEquals(TaskType.MAP,tid.getTaskType());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testToJobID(){
JobId jid=MRApps.toJobID("job_1_1");
assertEquals(1,jid.getAppId().getClusterTimestamp());
assertEquals(1,jid.getAppId().getId());
assertEquals(1,jid.getId());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=120000) public void testGetJobFileWithUser(){
Configuration conf=new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR,"/my/path/to/staging");
String jobFile=MRApps.getJobFile(conf,"dummy-user",new JobID("dummy-job",12345));
assertNotNull("getJobFile results in null.",jobFile);
assertEquals("jobFile with specified user is not as expected.","/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml",jobFile);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testLogSystemProperties() throws Exception {
Configuration conf=new Configuration();
conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG," ");
String value=MRApps.getSystemPropertiesToLog(conf);
assertNull(value);
String classpath="java.class.path";
String os="os.name";
String version="java.version";
conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG,classpath + ", " + os);
value=MRApps.getSystemPropertiesToLog(conf);
assertNotNull(value);
assertTrue(value.contains(classpath));
assertTrue(value.contains(os));
assertFalse(value.contains(version));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test(timeout=30000) public void testSetupDistributedCache() throws Exception {
Configuration conf=new Configuration();
conf.setClass("fs.mockfs.impl",MockFileSystem.class,FileSystem.class);
URI mockUri=URI.create("mockfs://mock/");
FileSystem mockFs=((FilterFileSystem)FileSystem.get(mockUri,conf)).getRawFileSystem();
URI archive=new URI("mockfs://mock/tmp/something.zip");
Path archivePath=new Path(archive);
URI file=new URI("mockfs://mock/tmp/something.txt#something");
Path filePath=new Path(file);
when(mockFs.resolvePath(archivePath)).thenReturn(archivePath);
when(mockFs.resolvePath(filePath)).thenReturn(filePath);
DistributedCache.addCacheArchive(archive,conf);
conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS,"10");
conf.set(MRJobConfig.CACHE_ARCHIVES_SIZES,"10");
conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES,"true");
DistributedCache.addCacheFile(file,conf);
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS,"11");
conf.set(MRJobConfig.CACHE_FILES_SIZES,"11");
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES,"true");
Map localResources=new HashMap();
MRApps.setupDistributedCache(conf,localResources);
assertEquals(2,localResources.size());
LocalResource lr=localResources.get("something.zip");
assertNotNull(lr);
assertEquals(10l,lr.getSize());
assertEquals(10l,lr.getTimestamp());
assertEquals(LocalResourceType.ARCHIVE,lr.getType());
lr=localResources.get("something");
assertNotNull(lr);
assertEquals(11l,lr.getSize());
assertEquals(11l,lr.getTimestamp());
assertEquals(LocalResourceType.FILE,lr.getType());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=120000) public void testSetClasspath() throws IOException {
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=Job.getInstance(conf);
Map environment=new HashMap();
MRApps.setClasspath(environment,job.getConfiguration());
assertTrue(environment.get("CLASSPATH").startsWith(ApplicationConstants.Environment.PWD.$$() + ApplicationConstants.CLASS_PATH_SEPARATOR));
String yarnAppClasspath=job.getConfiguration().get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,StringUtils.join(",",YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH));
if (yarnAppClasspath != null) {
yarnAppClasspath=yarnAppClasspath.replaceAll(",\\s*",ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(yarnAppClasspath));
String mrAppClasspath=job.getConfiguration().get(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,MRJobConfig.DEFAULT_MAPREDUCE_CROSS_PLATFORM_APPLICATION_CLASSPATH);
if (mrAppClasspath != null) {
mrAppClasspath=mrAppClasspath.replaceAll(",\\s*",ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(mrAppClasspath));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testTaskIDtoString(){
TaskId tid=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
tid.getJobId().setAppId(ApplicationId.newInstance(0,0));
tid.setTaskType(TaskType.MAP);
TaskType type=tid.getTaskType();
System.err.println(type);
type=TaskType.REDUCE;
System.err.println(type);
System.err.println(tid.getTaskType());
assertEquals("task_0_0000_m_000000",MRApps.toString(tid));
tid.setTaskType(TaskType.REDUCE);
assertEquals("task_0_0000_r_000000",MRApps.toString(tid));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testTaskAttemptIDtoString(){
TaskAttemptId taid=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class);
taid.setTaskId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class));
taid.getTaskId().setTaskType(TaskType.MAP);
taid.getTaskId().setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
taid.getTaskId().getJobId().setAppId(ApplicationId.newInstance(0,0));
assertEquals("attempt_0_0000_m_000000_0",MRApps.toString(taid));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testJobIDtoString(){
JobId jid=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class);
jid.setAppId(ApplicationId.newInstance(0,0));
assertEquals("job_0_0000",MRApps.toString(jid));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testToTaskAttemptID(){
TaskAttemptId taid=MRApps.toTaskAttemptID("attempt_0_1_m_2_3");
assertEquals(0,taid.getTaskId().getJobId().getAppId().getClusterTimestamp());
assertEquals(1,taid.getTaskId().getJobId().getAppId().getId());
assertEquals(1,taid.getTaskId().getJobId().getId());
assertEquals(2,taid.getTaskId().getId());
assertEquals(3,taid.getId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=120000) public void testSetClasspathWithArchives() throws IOException {
File testTGZ=new File(testWorkDir,"test.tgz");
FileOutputStream out=new FileOutputStream(testTGZ);
out.write(0);
out.close();
Configuration conf=new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true);
Job job=Job.getInstance(conf);
conf=job.getConfiguration();
String testTGZQualifiedPath=FileSystem.getLocal(conf).makeQualified(new Path(testTGZ.getAbsolutePath())).toString();
conf.set(MRJobConfig.CLASSPATH_ARCHIVES,testTGZQualifiedPath);
conf.set(MRJobConfig.CACHE_ARCHIVES,testTGZQualifiedPath + "#testTGZ");
Map environment=new HashMap();
MRApps.setClasspath(environment,conf);
assertTrue(environment.get("CLASSPATH").startsWith(ApplicationConstants.Environment.PWD.$$() + ApplicationConstants.CLASS_PATH_SEPARATOR));
String confClasspath=job.getConfiguration().get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,StringUtils.join(",",YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH));
if (confClasspath != null) {
confClasspath=confClasspath.replaceAll(",\\s*",ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(confClasspath));
assertTrue(environment.get("CLASSPATH").contains("testTGZ"));
}
InternalCallVerifier BooleanVerifier
@Test public void testCloseShouldCloseTheSocketWhichIsCreatedByInit() throws Exception {
AbstractMetricsContext context=new GangliaContext();
context.init("gangliaContext",ContextFactory.getFactory());
GangliaContext gangliaContext=(GangliaContext)context;
assertFalse("Socket already closed",gangliaContext.datagramSocket.isClosed());
context.close();
assertTrue("Socket not closed",gangliaContext.datagramSocket.isClosed());
}
InternalCallVerifier EqualityVerifier
@Test public void testPutMetrics(){
GraphiteSink sink=new GraphiteSink();
List tags=new ArrayList();
tags.add(new MetricsTag(MsInfo.Context,"all"));
tags.add(new MetricsTag(MsInfo.Hostname,"host"));
Set metrics=new HashSet();
metrics.add(makeMetric("foo1",1.25));
metrics.add(makeMetric("foo2",2.25));
MetricsRecord record=new MetricsRecordImpl(MsInfo.Context,(long)10000,tags,metrics);
OutputStreamWriter mockWriter=mock(OutputStreamWriter.class);
ArgumentCaptor argument=ArgumentCaptor.forClass(String.class);
Whitebox.setInternalState(sink,"writer",mockWriter);
sink.putMetrics(record);
try {
verify(mockWriter).write(argument.capture());
}
catch ( IOException e) {
e.printStackTrace();
}
String result=argument.getValue().toString();
assertEquals(true,result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") || result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n"));
}
InternalCallVerifier EqualityVerifier
@Test public void testPutMetrics2(){
GraphiteSink sink=new GraphiteSink();
List tags=new ArrayList();
tags.add(new MetricsTag(MsInfo.Context,"all"));
tags.add(new MetricsTag(MsInfo.Hostname,null));
Set metrics=new HashSet();
metrics.add(makeMetric("foo1",1));
metrics.add(makeMetric("foo2",2));
MetricsRecord record=new MetricsRecordImpl(MsInfo.Context,(long)10000,tags,metrics);
OutputStreamWriter mockWriter=mock(OutputStreamWriter.class);
ArgumentCaptor argument=ArgumentCaptor.forClass(String.class);
Whitebox.setInternalState(sink,"writer",mockWriter);
sink.putMetrics(record);
try {
verify(mockWriter).write(argument.capture());
}
catch ( IOException e) {
e.printStackTrace();
}
String result=argument.getValue().toString();
assertEquals(true,result.equals("null.all.Context.Context=all.foo1 1 10\n" + "null.all.Context.Context=all.foo2 2 10\n") || result.equals("null.all.Context.Context=all.foo2 2 10\n" + "null.all.Context.Context=all.foo1 1 10\n"));
}
InternalCallVerifier EqualityVerifier
@Test public void testPerMetricFiltering(){
SubsetConfiguration fc=new ConfigBuilder().add("p.exclude","foo").subset("p");
MetricsCollectorImpl mb=new MetricsCollectorImpl();
mb.setMetricFilter(newGlobFilter(fc));
MetricsRecordBuilderImpl rb=mb.addRecord("foo");
rb.tag(info("foo",""),"").addCounter(info("c0",""),0).addGauge(info("foo",""),1);
assertEquals("1 tag",1,rb.tags().size());
assertEquals("1 metric",1,rb.metrics().size());
assertEquals("expect foo tag","foo",rb.tags().get(0).name());
assertEquals("expect c0","c0",rb.metrics().get(0).name());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void recordBuilderShouldNoOpIfFiltered(){
SubsetConfiguration fc=new ConfigBuilder().add("p.exclude","foo").subset("p");
MetricsCollectorImpl mb=new MetricsCollectorImpl();
mb.setRecordFilter(newGlobFilter(fc));
MetricsRecordBuilderImpl rb=mb.addRecord("foo");
rb.tag(info("foo",""),"value").addGauge(info("g0",""),1);
assertEquals("no tags",0,rb.tags().size());
assertEquals("no metrics",0,rb.metrics().size());
assertNull("null record",rb.getRecord());
assertEquals("no records",0,mb.getRecords().size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetMetricsAndJmx() throws Exception {
TestSource source=new TestSource("test");
MetricsSourceBuilder sb=MetricsAnnotations.newSourceBuilder(source);
final MetricsSource s=sb.build();
List injectedTags=new ArrayList();
MetricsSourceAdapter sa=new MetricsSourceAdapter("test","test","test desc",s,injectedTags,null,null,1,false);
MetricsCollectorImpl builder=new MetricsCollectorImpl();
Iterable metricsRecords=sa.getMetrics(builder,true);
MetricsRecordImpl metricsRecord=metricsRecords.iterator().next();
assertEquals(0L,metricsRecord.metrics().iterator().next().value().longValue());
Thread.sleep(100);
assertEquals(0L,(Number)sa.getAttribute("C1"));
source.incrementCnt();
builder=new MetricsCollectorImpl();
metricsRecords=sa.getMetrics(builder,true);
metricsRecord=metricsRecords.iterator().next();
assertTrue(metricsRecord.metrics().iterator().hasNext());
Thread.sleep(100);
assertEquals(1L,(Number)sa.getAttribute("C1"));
}
InternalCallVerifier NullVerifier
@Test public void testUnregisterSource(){
MetricsSystem ms=new MetricsSystemImpl();
TestSource ts1=new TestSource("ts1");
TestSource ts2=new TestSource("ts2");
ms.register("ts1","",ts1);
ms.register("ts2","",ts2);
MetricsSource s1=ms.getSource("ts1");
assertNotNull(s1);
ms.unregisterSource("ts1");
s1=ms.getSource("ts1");
assertNull(s1);
MetricsSource s2=ms.getSource("ts2");
assertNotNull(s2);
ms.shutdown();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultiThreadedPublish() throws Exception {
final int numThreads=10;
new ConfigBuilder().add("*.period",80).add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,numThreads).save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
final MetricsSystemImpl ms=new MetricsSystemImpl("Test");
ms.start();
final CollectingSink sink=new CollectingSink(numThreads);
ms.registerSink("collector","Collector of values from all threads.",sink);
final TestSource[] sources=new TestSource[numThreads];
final Thread[] threads=new Thread[numThreads];
final String[] results=new String[numThreads];
final CyclicBarrier barrier1=new CyclicBarrier(numThreads), barrier2=new CyclicBarrier(numThreads);
for (int i=0; i < numThreads; i++) {
sources[i]=ms.register("threadSource" + i,"A source of my threaded goodness.",new TestSource("threadSourceRec" + i));
threads[i]=new Thread(new Runnable(){
private boolean safeAwait( int mySource, CyclicBarrier barrier){
try {
barrier1.await(2,TimeUnit.SECONDS);
}
catch ( InterruptedException e) {
results[mySource]="Interrupted";
return false;
}
catch ( BrokenBarrierException e) {
results[mySource]="Broken Barrier";
return false;
}
catch ( TimeoutException e) {
results[mySource]="Timed out on barrier";
return false;
}
return true;
}
@Override public void run(){
int mySource=Integer.parseInt(Thread.currentThread().getName());
if (sink.collected[mySource].get() != 0L) {
results[mySource]="Someone else collected my metric!";
return;
}
if (!safeAwait(mySource,barrier1)) return;
sources[mySource].g1.set(230);
ms.publishMetricsNow();
if (!safeAwait(mySource,barrier2)) return;
if (sink.collected[mySource].get() != 230L) {
results[mySource]="Metric not collected!";
return;
}
results[mySource]="Passed";
}
}
,"" + i);
}
for ( Thread t : threads) t.start();
for ( Thread t : threads) t.join();
assertEquals(0L,ms.droppedPubAll.value());
assertTrue(StringUtils.join("\n",Arrays.asList(results)),Iterables.all(Arrays.asList(results),new Predicate(){
@Override public boolean apply( @Nullable String input){
return input.equalsIgnoreCase("Passed");
}
}
));
ms.stop();
ms.shutdown();
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testInitFirstVerifyStopInvokedImmediately() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.*.source.filter.exclude","s0").add("test.source.s1.metric.filter.exclude","X*").add("test.sink.sink1.metric.filter.exclude","Y*").add("test.sink.sink2.metric.filter.exclude","Y*").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms=new MetricsSystemImpl("Test");
ms.start();
ms.register("s0","s0 desc",new TestSource("s0rec"));
TestSource s1=ms.register("s1","s1 desc",new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1=mock(MetricsSink.class);
MetricsSink sink2=mock(MetricsSink.class);
ms.registerSink("sink1","sink1 desc",sink1);
ms.registerSink("sink2","sink2 desc",sink2);
ms.publishMetricsNow();
ms.stop();
ms.shutdown();
verify(sink1,atMost(2)).putMetrics(r1.capture());
List mr1=r1.getAllValues();
verify(sink2,atMost(2)).putMetrics(r2.capture());
List mr2=r2.getAllValues();
if (mr1.size() != 0 && mr2.size() != 0) {
checkMetricsRecords(mr1);
assertEquals("output",mr1,mr2);
}
else if (mr1.size() != 0) {
checkMetricsRecords(mr1);
}
else if (mr2.size() != 0) {
checkMetricsRecords(mr2);
}
}
InternalCallVerifier IdentityVerifier NullVerifier HybridVerifier
@Test public void testRegisterDups(){
MetricsSystem ms=new MetricsSystemImpl();
TestSource ts1=new TestSource("ts1");
TestSource ts2=new TestSource("ts2");
ms.register("ts1","",ts1);
MetricsSource s1=ms.getSource("ts1");
assertNotNull(s1);
ms.register("ts1","",ts2);
MetricsSource s2=ms.getSource("ts1");
assertNotNull(s2);
assertNotSame(s1,s2);
ms.shutdown();
}
InternalCallVerifier NullVerifier
@Test public void testStartStopStart(){
DefaultMetricsSystem.shutdown();
MetricsSystemImpl ms=new MetricsSystemImpl("test");
TestSource ts=new TestSource("ts");
ms.start();
ms.register("ts","",ts);
MetricsSourceAdapter sa=ms.getSourceAdapter("ts");
assertNotNull(sa);
assertNotNull(sa.getMBeanName());
ms.stop();
ms.shutdown();
ms.start();
sa=ms.getSourceAdapter("ts");
assertNotNull(sa);
assertNotNull(sa.getMBeanName());
ms.stop();
ms.shutdown();
}
InternalCallVerifier EqualityVerifier
@Test public void testInitFirstVerifyCallBacks() throws Exception {
DefaultMetricsSystem.shutdown();
new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.*.source.filter.exclude","s0").add("test.source.s1.metric.filter.exclude","X*").add("test.sink.sink1.metric.filter.exclude","Y*").add("test.sink.sink2.metric.filter.exclude","Y*").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms=new MetricsSystemImpl("Test");
ms.start();
ms.register("s0","s0 desc",new TestSource("s0rec"));
TestSource s1=ms.register("s1","s1 desc",new TestSource("s1rec"));
s1.c1.incr();
s1.xxx.incr();
s1.g1.set(2);
s1.yyy.incr(2);
s1.s1.add(0);
MetricsSink sink1=mock(MetricsSink.class);
MetricsSink sink2=mock(MetricsSink.class);
ms.registerSink("sink1","sink1 desc",sink1);
ms.registerSink("sink2","sink2 desc",sink2);
ms.publishMetricsNow();
try {
verify(sink1,timeout(200).times(2)).putMetrics(r1.capture());
verify(sink2,timeout(200).times(2)).putMetrics(r2.capture());
}
finally {
ms.stop();
ms.shutdown();
}
List mr1=r1.getAllValues();
List mr2=r2.getAllValues();
checkMetricsRecords(mr1);
assertEquals("output",mr1,mr2);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHangingSink(){
new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.sink.hanging.retry.delay","1").add("test.sink.hanging.retry.backoff","1.01").add("test.sink.hanging.retry.count","0").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test"));
MetricsSystemImpl ms=new MetricsSystemImpl("Test");
ms.start();
TestSource s=ms.register("s3","s3 desc",new TestSource("s3rec"));
s.c1.incr();
HangingSink hanging=new HangingSink();
ms.registerSink("hanging","Hang the sink!",hanging);
ms.publishMetricsNow();
assertEquals(1L,ms.droppedPubAll.value());
assertFalse(hanging.getInterrupted());
ms.stop();
ms.shutdown();
assertTrue(hanging.getInterrupted());
assertTrue("The sink didn't get called after its first hang " + "for subsequent records.",hanging.getGotCalledSecondTime());
}
InternalCallVerifier EqualityVerifier
/**
* Test the common use cases
*/
@Test public void testCommon(){
MetricsVisitor visitor=mock(MetricsVisitor.class);
MetricsRegistry registry=new MetricsRegistry("test");
List metrics=MetricsLists.builder("test").addCounter(info("c1","int counter"),1).addCounter(info("c2","long counter"),2L).addGauge(info("g1","int gauge"),5).addGauge(info("g2","long gauge"),6L).addGauge(info("g3","float gauge"),7f).addGauge(info("g4","double gauge"),8d).metrics();
for ( AbstractMetric metric : metrics) {
metric.visit(visitor);
}
verify(visitor).counter(c1.capture(),eq(1));
assertEquals("c1 name","c1",c1.getValue().name());
assertEquals("c1 description","int counter",c1.getValue().description());
verify(visitor).counter(c2.capture(),eq(2L));
assertEquals("c2 name","c2",c2.getValue().name());
assertEquals("c2 description","long counter",c2.getValue().description());
verify(visitor).gauge(g1.capture(),eq(5));
assertEquals("g1 name","g1",g1.getValue().name());
assertEquals("g1 description","int gauge",g1.getValue().description());
verify(visitor).gauge(g2.capture(),eq(6L));
assertEquals("g2 name","g2",g2.getValue().name());
assertEquals("g2 description","long gauge",g2.getValue().description());
verify(visitor).gauge(g3.capture(),eq(7f));
assertEquals("g3 name","g3",g3.getValue().name());
assertEquals("g3 description","float gauge",g3.getValue().description());
verify(visitor).gauge(g4.capture(),eq(8d));
assertEquals("g4 name","g4",g4.getValue().name());
assertEquals("g4 description","double gauge",g4.getValue().description());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test concurrent consumer access, which is illegal
* @throws Exception
*/
@Test public void testConcurrentConsumers() throws Exception {
final SinkQueue q=newSleepingConsumerQueue(2,1);
assertTrue("should enqueue",q.enqueue(2));
assertEquals("queue back",2,(int)q.back());
assertTrue("should drop",!q.enqueue(3));
shouldThrowCME(new Fun(){
@Override public void run(){
q.clear();
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.consume(null);
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.consumeAll(null);
}
}
);
shouldThrowCME(new Fun(){
@Override public void run() throws Exception {
q.dequeue();
}
}
);
assertEquals("queue size",2,q.size());
assertEquals("queue front",1,(int)q.front());
assertEquals("queue back",2,(int)q.back());
}
InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test the consumer throwing exceptions
* @throws Exception
*/
@Test public void testConsumerException() throws Exception {
final SinkQueue q=new SinkQueue(1);
final RuntimeException ex=new RuntimeException("expected");
q.enqueue(1);
try {
q.consume(new Consumer(){
@Override public void consume( Integer e){
throw ex;
}
}
);
}
catch ( Exception expected) {
assertSame("consumer exception",ex,expected);
}
assertEquals("queue size",1,q.size());
assertEquals("element",1,(int)q.dequeue());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test common use case
* @throws Exception
*/
@Test public void testCommon() throws Exception {
final SinkQueue q=new SinkQueue(2);
q.enqueue(1);
assertEquals("queue front",1,(int)q.front());
assertEquals("queue back",1,(int)q.back());
assertEquals("element",1,(int)q.dequeue());
assertTrue("should enqueue",q.enqueue(2));
q.consume(new Consumer(){
@Override public void consume( Integer e){
assertEquals("element",2,(int)e);
}
}
);
assertTrue("should enqueue",q.enqueue(3));
assertEquals("element",3,(int)q.dequeue());
assertEquals("queue size",0,q.size());
assertEquals("queue front",null,q.front());
assertEquals("queue back",null,q.back());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test nonblocking enqueue when queue is full
* @throws Exception
*/
@Test public void testFull() throws Exception {
final SinkQueue q=new SinkQueue(1);
q.enqueue(1);
assertTrue("should drop",!q.enqueue(2));
assertEquals("element",1,(int)q.dequeue());
q.enqueue(3);
q.consume(new Consumer(){
@Override public void consume( Integer e){
assertEquals("element",3,(int)e);
}
}
);
assertEquals("queue size",0,q.size());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test consumers that take their time.
* @throws Exception
*/
@Test public void testHangingConsumer() throws Exception {
SinkQueue q=newSleepingConsumerQueue(2,1,2);
assertEquals("queue back",2,(int)q.back());
assertTrue("should drop",!q.enqueue(3));
assertEquals("queue size",2,q.size());
assertEquals("queue head",1,(int)q.front());
assertEquals("queue back",2,(int)q.back());
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test the consumeAll method
* @throws Exception
*/
@Test public void testConsumeAll() throws Exception {
final int capacity=64;
final SinkQueue q=new SinkQueue(capacity);
for (int i=0; i < capacity; ++i) {
assertTrue("should enqueue",q.enqueue(i));
}
assertTrue("should not enqueue",!q.enqueue(capacity));
final Runnable trigger=mock(Runnable.class);
q.consumeAll(new Consumer(){
private int expected=0;
@Override public void consume( Integer e){
assertEquals("element",expected++,(int)e);
trigger.run();
}
}
);
verify(trigger,times(capacity)).run();
}
InternalCallVerifier EqualityVerifier
/**
* Test the clear method
*/
@Test public void testClear(){
final SinkQueue q=new SinkQueue(128);
for (int i=0; i < q.capacity() + 97; ++i) {
q.enqueue(i);
}
assertEquals("queue size",q.capacity(),q.size());
q.clear();
assertEquals("queue size",0,q.size());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test various factory methods
*/
@Test public void testNewMetrics(){
final MetricsRegistry r=new MetricsRegistry("test");
r.newCounter("c1","c1 desc",1);
r.newCounter("c2","c2 desc",2L);
r.newGauge("g1","g1 desc",3);
r.newGauge("g2","g2 desc",4L);
r.newStat("s1","s1 desc","ops","time");
assertEquals("num metrics in registry",5,r.metrics().size());
assertTrue("c1 found",r.get("c1") instanceof MutableCounterInt);
assertTrue("c2 found",r.get("c2") instanceof MutableCounterLong);
assertTrue("g1 found",r.get("g1") instanceof MutableGaugeInt);
assertTrue("g2 found",r.get("g2") instanceof MutableGaugeLong);
assertTrue("s1 found",r.get("s1") instanceof MutableStat);
expectMetricsException("Metric name c1 already exists",new Runnable(){
@Override public void run(){
r.newCounter("c1","test dup",0);
}
}
);
}
InternalCallVerifier EqualityVerifier
@Test public void testCommonCases(){
UniqueNames u=new UniqueNames();
assertEquals("foo",u.uniqueName("foo"));
assertEquals("foo-1",u.uniqueName("foo"));
}
InternalCallVerifier EqualityVerifier
@Test public void testCollisions(){
UniqueNames u=new UniqueNames();
u.uniqueName("foo");
assertEquals("foo-1",u.uniqueName("foo-1"));
assertEquals("foo-2",u.uniqueName("foo"));
assertEquals("foo-1-1",u.uniqueName("foo-1"));
assertEquals("foo-2-1",u.uniqueName("foo-2"));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("deprecation") @Test public void testGet(){
MetricsCache cache=new MetricsCache();
assertNull("empty",cache.get("r",Arrays.asList(makeTag("t","t"))));
MetricsRecord mr=makeRecord("r",Arrays.asList(makeTag("t","t")),Arrays.asList(makeMetric("m",1)));
cache.update(mr);
MetricsCache.Record cr=cache.get("r",mr.tags());
LOG.debug("tags=" + mr.tags() + " cr="+ cr);
assertNotNull("Got record",cr);
assertEquals("contains 1 metric",1,cr.metrics().size());
checkMetricValue("new metric value",cr,"m",1);
}
IterativeVerifier BranchVerifier InternalCallVerifier NullVerifier
@Test public void testOverflow(){
MetricsCache cache=new MetricsCache();
MetricsCache.Record cr;
Collection t0=Arrays.asList(makeTag("t0","0"));
for (int i=0; i < MetricsCache.MAX_RECS_PER_NAME_DEFAULT + 1; ++i) {
cr=cache.update(makeRecord("r",Arrays.asList(makeTag("t" + i,"" + i)),Arrays.asList(makeMetric("m",i))));
checkMetricValue("new metric value",cr,"m",i);
if (i < MetricsCache.MAX_RECS_PER_NAME_DEFAULT) {
assertNotNull("t0 is still there",cache.get("r",t0));
}
}
assertNull("t0 is gone",cache.get("r",t0));
}
InternalCallVerifier BooleanVerifier
/**
* Make sure metrics tag has a sane hashCode impl
*/
@Test public void testNullTag(){
MetricsCache cache=new MetricsCache();
MetricsRecord mr=makeRecord("r",Arrays.asList(makeTag("t",null)),Arrays.asList(makeMetric("m",0),makeMetric("m1",1)));
MetricsCache.Record cr=cache.update(mr);
assertTrue("t value should be null",null == cr.getTag("t"));
}
InternalCallVerifier EqualityVerifier
@SuppressWarnings("deprecation") @Test public void testUpdate(){
MetricsCache cache=new MetricsCache();
MetricsRecord mr=makeRecord("r",Arrays.asList(makeTag("t","tv")),Arrays.asList(makeMetric("m",0),makeMetric("m1",1)));
MetricsCache.Record cr=cache.update(mr);
verify(mr).name();
verify(mr).tags();
verify(mr).metrics();
assertEquals("same record size",cr.metrics().size(),((Collection)mr.metrics()).size());
assertEquals("same metric value",0,cr.getMetric("m"));
MetricsRecord mr2=makeRecord("r",Arrays.asList(makeTag("t","tv")),Arrays.asList(makeMetric("m",2),makeMetric("m2",42)));
cr=cache.update(mr2);
assertEquals("contains 3 metric",3,cr.metrics().size());
checkMetricValue("updated metric value",cr,"m",2);
checkMetricValue("old metric value",cr,"m1",1);
checkMetricValue("new metric value",cr,"m2",42);
MetricsRecord mr3=makeRecord("r",Arrays.asList(makeTag("t","tv3")),Arrays.asList(makeMetric("m3",3)));
cr=cache.update(mr3);
assertEquals("contains 1 metric",1,cr.metrics().size());
checkMetricValue("updated metric value",cr,"m3",3);
assertEquals("no tags",0,cr.tags().size());
cr=cache.update(mr3,true);
assertEquals("Got 1 tag",1,cr.tags().size());
assertEquals("Tag value","tv3",cr.getTag("t"));
checkMetricValue("Metric value",cr,"m3",3);
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Check that counts and quantile estimates are correctly reset after a call
* to {@link SampleQuantiles#clear()}.
*/
@Test public void testClear() throws IOException {
for (int i=0; i < 1000; i++) {
estimator.insert(i);
}
estimator.clear();
assertEquals(estimator.getCount(),0);
assertEquals(estimator.getSampleCount(),0);
assertNull(estimator.snapshot());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Check that the counts of the number of items in the window and sample are
* incremented correctly as items are added.
*/
@Test public void testCount() throws IOException {
assertEquals(estimator.getCount(),0);
assertEquals(estimator.getSampleCount(),0);
assertNull(estimator.snapshot());
estimator.insert(1337);
assertEquals(estimator.getCount(),1);
estimator.snapshot();
assertEquals(estimator.getSampleCount(),1);
assertEquals("50.00 %ile +/- 5.00%: 1337\n" + "75.00 %ile +/- 2.50%: 1337\n" + "90.00 %ile +/- 1.00%: 1337\n"+ "95.00 %ile +/- 0.50%: 1337\n"+ "99.00 %ile +/- 0.10%: 1337",estimator.toString());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
/**
* Correctness test that checks that absolute error of the estimate is within
* specified error bounds for some randomly permuted streams of items.
*/
@Test public void testQuantileError() throws IOException {
final int count=100000;
Random r=new Random(0xDEADDEAD);
Long[] values=new Long[count];
for (int i=0; i < count; i++) {
values[i]=(long)(i + 1);
}
for (int i=0; i < 10; i++) {
System.out.println("Starting run " + i);
Collections.shuffle(Arrays.asList(values),r);
estimator.clear();
for (int j=0; j < count; j++) {
estimator.insert(values[j]);
}
Map snapshot;
snapshot=estimator.snapshot();
for ( Quantile q : quantiles) {
long actual=(long)(q.quantile * count);
long error=(long)(q.error * count);
long estimate=snapshot.get(q);
System.out.println(String.format("Expected %d with error %d, estimated %d",actual,error,estimate));
assertTrue(estimate <= actual + error);
assertTrue(estimate >= actual - error);
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Some simple use cases
*/
@Test public void testSimple(){
SampleStat stat=new SampleStat();
assertEquals("num samples",0,stat.numSamples());
assertEquals("mean",0.0,stat.mean(),EPSILON);
assertEquals("variance",0.0,stat.variance(),EPSILON);
assertEquals("stddev",0.0,stat.stddev(),EPSILON);
assertEquals("min",SampleStat.MinMax.DEFAULT_MIN_VALUE,stat.min(),EPSILON);
assertEquals("max",SampleStat.MinMax.DEFAULT_MAX_VALUE,stat.max(),EPSILON);
stat.add(3);
assertEquals("num samples",1L,stat.numSamples());
assertEquals("mean",3.0,stat.mean(),EPSILON);
assertEquals("variance",0.0,stat.variance(),EPSILON);
assertEquals("stddev",0.0,stat.stddev(),EPSILON);
assertEquals("min",3.0,stat.min(),EPSILON);
assertEquals("max",3.0,stat.max(),EPSILON);
stat.add(2).add(1);
assertEquals("num samples",3L,stat.numSamples());
assertEquals("mean",2.0,stat.mean(),EPSILON);
assertEquals("variance",1.0,stat.variance(),EPSILON);
assertEquals("stddev",1.0,stat.stddev(),EPSILON);
assertEquals("min",1.0,stat.min(),EPSILON);
assertEquals("max",3.0,stat.max(),EPSILON);
stat.reset();
assertEquals("num samples",0,stat.numSamples());
assertEquals("mean",0.0,stat.mean(),EPSILON);
assertEquals("variance",0.0,stat.variance(),EPSILON);
assertEquals("stddev",0.0,stat.stddev(),EPSILON);
assertEquals("min",SampleStat.MinMax.DEFAULT_MIN_VALUE,stat.min(),EPSILON);
assertEquals("max",SampleStat.MinMax.DEFAULT_MAX_VALUE,stat.max(),EPSILON);
}
InternalCallVerifier EqualityVerifier
@Test public void testKeytabGen() throws Exception {
MiniKdc kdc=getKdc();
File workDir=getWorkDir();
kdc.createPrincipal(new File(workDir,"keytab"),"foo/bar","bar/foo");
Keytab kt=Keytab.read(new File(workDir,"keytab"));
Set principals=new HashSet();
for ( KeytabEntry entry : kt.getEntries()) {
principals.add(entry.getPrincipalName());
}
Assert.assertEquals(new HashSet(Arrays.asList("foo\\bar@" + kdc.getRealm(),"bar\\foo@" + kdc.getRealm())),principals);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testKerberosLogin() throws Exception {
MiniKdc kdc=getKdc();
File workDir=getWorkDir();
LoginContext loginContext=null;
try {
String principal="foo";
File keytab=new File(workDir,"foo.keytab");
kdc.createPrincipal(keytab,principal);
Set principals=new HashSet();
principals.add(new KerberosPrincipal(principal));
Subject subject=new Subject(false,principals,new HashSet(),new HashSet());
loginContext=new LoginContext("",subject,null,KerberosConfiguration.createClientConfig(principal,keytab));
loginContext.login();
subject=loginContext.getSubject();
Assert.assertEquals(1,subject.getPrincipals().size());
Assert.assertEquals(KerberosPrincipal.class,subject.getPrincipals().iterator().next().getClass());
Assert.assertEquals(principal + "@" + kdc.getRealm(),subject.getPrincipals().iterator().next().getName());
loginContext.logout();
subject=new Subject(false,principals,new HashSet(),new HashSet());
loginContext=new LoginContext("",subject,null,KerberosConfiguration.createServerConfig(principal,keytab));
loginContext.login();
subject=loginContext.getSubject();
Assert.assertEquals(1,subject.getPrincipals().size());
Assert.assertEquals(KerberosPrincipal.class,subject.getPrincipals().iterator().next().getClass());
Assert.assertEquals(principal + "@" + kdc.getRealm(),subject.getPrincipals().iterator().next().getName());
loginContext.logout();
}
finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier
@Test public void testContains() throws Exception {
DatanodeDescriptor nodeNotInMap=DFSTestUtil.getDatanodeDescriptor("8.8.8.8","/d2/r4");
for (int i=0; i < dataNodes.length; i++) {
assertTrue(cluster.contains(dataNodes[i]));
}
assertFalse(cluster.contains(nodeNotInMap));
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemove() throws Exception {
for (int i=0; i < dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
}
for (int i=0; i < dataNodes.length; i++) {
assertFalse(cluster.contains(dataNodes[i]));
}
assertEquals(0,cluster.getNumOfLeaves());
for (int i=0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=180000) public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
Configuration conf=new HdfsConfiguration();
MiniDFSCluster cluster=null;
try {
String racks[]={"/a/b","/c"};
String hosts[]={"foo1.example.com","foo2.example.com"};
cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build();
cluster.waitActive();
NamenodeProtocols nn=cluster.getNameNodeRpc();
Assert.assertNotNull(nn);
DatanodeInfo[] info;
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertFalse(info.length == 2);
if (info.length == 1) {
break;
}
Thread.sleep(1000);
}
int validIdx=info[0].getHostName().equals(hosts[0]) ? 0 : 1;
int invalidIdx=validIdx == 1 ? 0 : 1;
StaticMapping.addNodeToRack(hosts[invalidIdx],racks[validIdx]);
LOG.info("datanode " + validIdx + " came up with network location "+ info[0].getNetworkLocation());
cluster.restartDataNode(invalidIdx);
Thread.sleep(5000);
while (true) {
info=nn.getDatanodeReport(DatanodeReportType.LIVE);
if (info.length == 2) {
break;
}
if (info.length == 0) {
LOG.info("got no valid DNs");
}
else if (info.length == 1) {
LOG.info("got one valid DN: " + info[0].getHostName() + " (at "+ info[0].getNetworkLocation()+ ")");
}
Thread.sleep(1000);
}
Assert.assertEquals(info[0].getNetworkLocation(),info[1].getNetworkLocation());
}
finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testCreateInvalidTopology() throws Exception {
NetworkTopology invalCluster=new NetworkTopology();
DatanodeDescriptor invalDataNodes[]=new DatanodeDescriptor[]{DFSTestUtil.getDatanodeDescriptor("1.1.1.1","/d1/r1"),DFSTestUtil.getDatanodeDescriptor("2.2.2.2","/d1/r1"),DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1")};
invalCluster.add(invalDataNodes[0]);
invalCluster.add(invalDataNodes[1]);
try {
invalCluster.add(invalDataNodes[2]);
fail("expected InvalidTopologyException");
}
catch ( NetworkTopology.InvalidTopologyException e) {
assertTrue(e.getMessage().startsWith("Failed to add "));
assertTrue(e.getMessage().contains("You cannot have a rack and a non-rack node at the same " + "level of the network topology."));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRacks() throws Exception {
assertEquals(cluster.getNumOfRacks(),6);
assertTrue(cluster.isOnSameRack(dataNodes[0],dataNodes[1]));
assertFalse(cluster.isOnSameRack(dataNodes[1],dataNodes[2]));
assertTrue(cluster.isOnSameRack(dataNodes[2],dataNodes[3]));
assertTrue(cluster.isOnSameRack(dataNodes[3],dataNodes[4]));
assertFalse(cluster.isOnSameRack(dataNodes[4],dataNodes[5]));
assertTrue(cluster.isOnSameRack(dataNodes[5],dataNodes[6]));
}
InternalCallVerifier EqualityVerifier
@Test public void testGetDistance() throws Exception {
assertEquals(cluster.getDistance(dataNodes[0],dataNodes[0]),0);
assertEquals(cluster.getDistance(dataNodes[0],dataNodes[1]),2);
assertEquals(cluster.getDistance(dataNodes[0],dataNodes[3]),4);
assertEquals(cluster.getDistance(dataNodes[0],dataNodes[6]),6);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNodeGroups() throws Exception {
assertEquals(3,cluster.getNumOfRacks());
assertTrue(cluster.isOnSameNodeGroup(dataNodes[0],dataNodes[1]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[1],dataNodes[2]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[2],dataNodes[3]));
assertTrue(cluster.isOnSameNodeGroup(dataNodes[3],dataNodes[4]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[4],dataNodes[5]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[5],dataNodes[6]));
assertFalse(cluster.isOnSameNodeGroup(dataNodes[6],dataNodes[7]));
}
InternalCallVerifier EqualityVerifier
@Test public void testGetDistance() throws Exception {
assertEquals(0,cluster.getDistance(dataNodes[0],dataNodes[0]));
assertEquals(2,cluster.getDistance(dataNodes[0],dataNodes[1]));
assertEquals(4,cluster.getDistance(dataNodes[0],dataNodes[2]));
assertEquals(6,cluster.getDistance(dataNodes[0],dataNodes[3]));
assertEquals(8,cluster.getDistance(dataNodes[0],dataNodes[6]));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRacks() throws Exception {
assertEquals(3,cluster.getNumOfRacks());
assertTrue(cluster.isOnSameRack(dataNodes[0],dataNodes[1]));
assertTrue(cluster.isOnSameRack(dataNodes[1],dataNodes[2]));
assertFalse(cluster.isOnSameRack(dataNodes[2],dataNodes[3]));
assertTrue(cluster.isOnSameRack(dataNodes[3],dataNodes[4]));
assertTrue(cluster.isOnSameRack(dataNodes[4],dataNodes[5]));
assertFalse(cluster.isOnSameRack(dataNodes[5],dataNodes[6]));
assertTrue(cluster.isOnSameRack(dataNodes[6],dataNodes[7]));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFilenameMeansMultiSwitch() throws Throwable {
Configuration conf=new Configuration();
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
ScriptBasedMapping mapping=createMapping(conf);
assertFalse("Expected to be multi switch",mapping.isSingleSwitch());
mapping.setConf(new Configuration());
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNoFilenameMeansSingleSwitch() throws Throwable {
Configuration conf=new Configuration();
ScriptBasedMapping mapping=createMapping(conf);
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
assertTrue("Expected to be single switch",AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testNoArgsMeansNoResult(){
Configuration conf=new Configuration();
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
ScriptBasedMapping mapping=createMapping(conf);
List names=new ArrayList();
names.add("some.machine.name");
names.add("other.machine.name");
List result=mapping.resolve(names);
assertNull("Expected an empty list",result);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testNoFilenameMeansSingleSwitch() throws Throwable {
Configuration conf=new Configuration();
ScriptBasedMapping mapping=createMapping(conf);
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
assertTrue("Expected to be single switch",AbstractDNSToSwitchMapping.isMappingSingleSwitch(mapping));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testNoArgsMeansNoResult(){
Configuration conf=new Configuration();
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1);
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename-1");
conf.set(ScriptBasedMappingWithDependency.DEPENDENCY_SCRIPT_FILENAME_KEY,"any-filename-2");
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,10);
ScriptBasedMappingWithDependency mapping=createMapping(conf);
List names=new ArrayList();
names.add("some.machine.name");
names.add("other.machine.name");
List result=mapping.resolve(names);
assertNull("Expected an empty list for resolve",result);
result=mapping.getDependency("some.machine.name");
assertNull("Expected an empty list for getDependency",result);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testFilenameMeansMultiSwitch() throws Throwable {
Configuration conf=new Configuration();
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY,"any-filename");
ScriptBasedMapping mapping=createMapping(conf);
assertFalse("Expected to be multi switch",mapping.isSingleSwitch());
mapping.setConf(new Configuration());
assertTrue("Expected to be single switch",mapping.isSingleSwitch());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Verify that a configuration string builds a topology
*/
@Test public void testReadNodesFromConfig() throws Throwable {
StaticMapping mapping=newInstance();
Configuration conf=new Configuration();
conf.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING,"n1=/r1,n2=/r2");
mapping.setConf(conf);
assertSingleSwitch(mapping);
List l1=new ArrayList(3);
l1.add("n1");
l1.add("unknown");
l1.add("n2");
List resolved=mapping.resolve(l1);
assertEquals(3,resolved.size());
assertEquals("/r1",resolved.get(0));
assertEquals(NetworkTopology.DEFAULT_RACK,resolved.get(1));
assertEquals("/r2",resolved.get(2));
Map switchMap=mapping.getSwitchMap();
String topology=mapping.dumpTopology();
LOG.info(topology);
assertEquals(topology,2,switchMap.size());
assertEquals(topology,"/r1",switchMap.get("n1"));
assertNull(topology,switchMap.get("unknown"));
}
InternalCallVerifier EqualityVerifier
@Test public void testAddResolveNodes() throws Throwable {
StaticMapping mapping=newInstance();
StaticMapping.addNodeToRack("n1","/r1");
List queryList=createQueryList();
List resolved=mapping.resolve(queryList);
assertEquals(2,resolved.size());
assertEquals("/r1",resolved.get(0));
assertEquals(NetworkTopology.DEFAULT_RACK,resolved.get(1));
Map switchMap=mapping.getSwitchMap();
String topology=mapping.dumpTopology();
LOG.info(topology);
assertEquals(topology,1,switchMap.size());
assertEquals(topology,"/r1",switchMap.get("n1"));
}
InternalCallVerifier BooleanVerifier
/**
* Verify the cached mapper delegates the switch mapping query to the inner
* mapping, which again handles arbitrary DNSToSwitchMapping implementations
* @throws Throwable on any problem
*/
@Test public void testCachingRelaysStringOperations() throws Throwable {
Configuration conf=new Configuration();
String scriptname="mappingscript.sh";
conf.set(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,scriptname);
ScriptBasedMapping scriptMapping=new ScriptBasedMapping(conf);
assertTrue("Did not find " + scriptname + " in "+ scriptMapping,scriptMapping.toString().contains(scriptname));
CachedDNSToSwitchMapping mapping=new CachedDNSToSwitchMapping(scriptMapping);
assertTrue("Did not find " + scriptname + " in "+ mapping,mapping.toString().contains(scriptname));
}
InternalCallVerifier BooleanVerifier
/**
* Verify the cached mapper delegates the switch mapping query to the inner
* mapping, which again handles arbitrary DNSToSwitchMapping implementations
* @throws Throwable on any problem
*/
@Test public void testCachingRelaysStringOperationsToNullScript() throws Throwable {
Configuration conf=new Configuration();
ScriptBasedMapping scriptMapping=new ScriptBasedMapping(conf);
assertTrue("Did not find " + ScriptBasedMapping.NO_SCRIPT + " in "+ scriptMapping,scriptMapping.toString().contains(ScriptBasedMapping.NO_SCRIPT));
CachedDNSToSwitchMapping mapping=new CachedDNSToSwitchMapping(scriptMapping);
assertTrue("Did not find " + ScriptBasedMapping.NO_SCRIPT + " in "+ mapping,mapping.toString().contains(ScriptBasedMapping.NO_SCRIPT));
}
InternalCallVerifier EqualityVerifier
@Test public void testFileDoesNotExist(){
TableMapping mapping=new TableMapping();
Configuration conf=new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,"/this/file/does/not/exist");
mapping.setConf(conf);
List names=new ArrayList();
names.add("a.b.c");
names.add("1.2.3.4");
List result=mapping.resolve(names);
assertEquals(names.size(),result.size());
assertEquals(result.get(0),NetworkTopology.DEFAULT_RACK);
assertEquals(result.get(1),NetworkTopology.DEFAULT_RACK);
}
InternalCallVerifier EqualityVerifier
@Test public void testResolve() throws IOException {
File mapFile=File.createTempFile(getClass().getSimpleName() + ".testResolve",".txt");
Files.write("a.b.c /rack1\n" + "1.2.3.4\t/rack2\n",mapFile,Charsets.UTF_8);
mapFile.deleteOnExit();
TableMapping mapping=new TableMapping();
Configuration conf=new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,mapFile.getCanonicalPath());
mapping.setConf(conf);
List names=new ArrayList();
names.add("a.b.c");
names.add("1.2.3.4");
List result=mapping.resolve(names);
assertEquals(names.size(),result.size());
assertEquals("/rack1",result.get(0));
assertEquals("/rack2",result.get(1));
}
InternalCallVerifier EqualityVerifier
@Test public void testTableCaching() throws IOException {
File mapFile=File.createTempFile(getClass().getSimpleName() + ".testTableCaching",".txt");
Files.write("a.b.c /rack1\n" + "1.2.3.4\t/rack2\n",mapFile,Charsets.UTF_8);
mapFile.deleteOnExit();
TableMapping mapping=new TableMapping();
Configuration conf=new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,mapFile.getCanonicalPath());
mapping.setConf(conf);
List names=new ArrayList();
names.add("a.b.c");
names.add("1.2.3.4");
List result1=mapping.resolve(names);
assertEquals(names.size(),result1.size());
assertEquals("/rack1",result1.get(0));
assertEquals("/rack2",result1.get(1));
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,"some bad value for a file");
List result2=mapping.resolve(names);
assertEquals(result1,result2);
}
InternalCallVerifier EqualityVerifier
@Test public void testClearingCachedMappings() throws IOException {
File mapFile=File.createTempFile(getClass().getSimpleName() + ".testClearingCachedMappings",".txt");
Files.write("a.b.c /rack1\n" + "1.2.3.4\t/rack2\n",mapFile,Charsets.UTF_8);
mapFile.deleteOnExit();
TableMapping mapping=new TableMapping();
Configuration conf=new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,mapFile.getCanonicalPath());
mapping.setConf(conf);
List names=new ArrayList();
names.add("a.b.c");
names.add("1.2.3.4");
List result=mapping.resolve(names);
assertEquals(names.size(),result.size());
assertEquals("/rack1",result.get(0));
assertEquals("/rack2",result.get(1));
Files.write("",mapFile,Charsets.UTF_8);
mapping.reloadCachedMappings();
names=new ArrayList();
names.add("a.b.c");
names.add("1.2.3.4");
result=mapping.resolve(names);
assertEquals(names.size(),result.size());
assertEquals(NetworkTopology.DEFAULT_RACK,result.get(0));
assertEquals(NetworkTopology.DEFAULT_RACK,result.get(1));
}
InternalCallVerifier EqualityVerifier
@Test public void testNoFile(){
TableMapping mapping=new TableMapping();
Configuration conf=new Configuration();
mapping.setConf(conf);
List names=new ArrayList();
names.add("a.b.c");
names.add("1.2.3.4");
List result=mapping.resolve(names);
assertEquals(names.size(),result.size());
assertEquals(NetworkTopology.DEFAULT_RACK,result.get(0));
assertEquals(NetworkTopology.DEFAULT_RACK,result.get(1));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testBadFile() throws IOException {
File mapFile=File.createTempFile(getClass().getSimpleName() + ".testBadFile",".txt");
Files.write("bad contents",mapFile,Charsets.UTF_8);
mapFile.deleteOnExit();
TableMapping mapping=new TableMapping();
Configuration conf=new Configuration();
conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,mapFile.getCanonicalPath());
mapping.setConf(conf);
List names=new ArrayList();
names.add("a.b.c");
names.add("1.2.3.4");
List result=mapping.resolve(names);
assertEquals(names.size(),result.size());
assertEquals(result.get(0),NetworkTopology.DEFAULT_RACK);
assertEquals(result.get(1),NetworkTopology.DEFAULT_RACK);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test setting some server options.
* @throws IOException
*/
@Test(timeout=180000) public void testServerOptions() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"test_sock_server_options").getAbsolutePath();
DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
try {
int bufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
int newBufSize=bufSize / 2;
serv.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE,newBufSize);
int nextBufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
Assert.assertEquals(newBufSize,nextBufSize);
int newTimeout=1000;
serv.setAttribute(DomainSocket.RECEIVE_TIMEOUT,newTimeout);
int nextTimeout=serv.getAttribute(DomainSocket.RECEIVE_TIMEOUT);
Assert.assertEquals(newTimeout,nextTimeout);
try {
serv.accept();
Assert.fail("expected the accept() to time out and fail");
}
catch ( SocketTimeoutException e) {
GenericTestUtils.assertExceptionContains("accept(2) error: ",e);
}
}
finally {
serv.close();
Assert.assertFalse(serv.isOpen());
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier
/**
* Test file descriptor passing.
* @throws IOException
*/
@Test(timeout=180000) public void testFdPassing() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"test_sock").getAbsolutePath();
final byte clientMsg1[]=new byte[]{0x11,0x22,0x33,0x44,0x55,0x66};
final byte serverMsg1[]=new byte[]{0x31,0x30,0x32,0x34,0x31,0x33,0x44,0x1,0x1,0x1,0x1,0x1};
final ArrayBlockingQueue threadResults=new ArrayBlockingQueue(2);
final DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
final PassedFile passedFiles[]=new PassedFile[]{new PassedFile(1),new PassedFile(2)};
final FileDescriptor passedFds[]=new FileDescriptor[passedFiles.length];
for (int i=0; i < passedFiles.length; i++) {
passedFds[i]=passedFiles[i].getInputStream().getFD();
}
Thread serverThread=new Thread(){
public void run(){
DomainSocket conn=null;
try {
conn=serv.accept();
byte in1[]=new byte[clientMsg1.length];
InputStream connInputStream=conn.getInputStream();
IOUtils.readFully(connInputStream,in1,0,in1.length);
Assert.assertTrue(Arrays.equals(clientMsg1,in1));
DomainSocket domainConn=(DomainSocket)conn;
domainConn.sendFileDescriptors(passedFds,serverMsg1,0,serverMsg1.length);
conn.close();
}
catch ( Throwable e) {
threadResults.add(e);
Assert.fail(e.getMessage());
}
threadResults.add(new Success());
}
}
;
serverThread.start();
Thread clientThread=new Thread(){
public void run(){
try {
DomainSocket client=DomainSocket.connect(TEST_PATH);
OutputStream clientOutputStream=client.getOutputStream();
InputStream clientInputStream=client.getInputStream();
clientOutputStream.write(clientMsg1);
DomainSocket domainConn=(DomainSocket)client;
byte in1[]=new byte[serverMsg1.length];
FileInputStream recvFis[]=new FileInputStream[passedFds.length];
int r=domainConn.recvFileInputStreams(recvFis,in1,0,in1.length - 1);
Assert.assertTrue(r > 0);
IOUtils.readFully(clientInputStream,in1,r,in1.length - r);
Assert.assertTrue(Arrays.equals(serverMsg1,in1));
for (int i=0; i < passedFds.length; i++) {
Assert.assertNotNull(recvFis[i]);
passedFiles[i].checkInputStream(recvFis[i]);
}
for ( FileInputStream fis : recvFis) {
fis.close();
}
client.close();
}
catch ( Throwable e) {
threadResults.add(e);
}
threadResults.add(new Success());
}
}
;
clientThread.start();
for (int i=0; i < 2; i++) {
Throwable t=threadResults.take();
if (!(t instanceof Success)) {
Assert.fail(t.getMessage() + ExceptionUtils.getStackTrace(t));
}
}
serverThread.join(120000);
clientThread.join(120000);
serv.close();
for ( PassedFile pf : passedFiles) {
pf.cleanup();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test that we get a read result of -1 on EOF.
* @throws IOException
*/
@Test(timeout=180000) public void testSocketReadEof() throws Exception {
final String TEST_PATH=new File(sockDir.getDir(),"testSocketReadEof").getAbsolutePath();
final DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH);
ExecutorService exeServ=Executors.newSingleThreadExecutor();
Callable callable=new Callable(){
public Void call(){
DomainSocket conn;
try {
conn=serv.accept();
}
catch ( IOException e) {
throw new RuntimeException("unexpected IOException",e);
}
byte buf[]=new byte[100];
for (int i=0; i < buf.length; i++) {
buf[i]=0;
}
try {
Assert.assertEquals(-1,conn.getInputStream().read());
}
catch ( IOException e) {
throw new RuntimeException("unexpected IOException",e);
}
return null;
}
}
;
Future future=exeServ.submit(callable);
DomainSocket conn=DomainSocket.connect(serv.getPath());
Thread.sleep(50);
conn.close();
serv.close();
future.get(2,TimeUnit.MINUTES);
}
InternalCallVerifier EqualityVerifier
@Test public void testMultiMatchers() throws Exception {
long shortExpirationPeriod=1 * 1000 * 1000* 1000;
NfsExports matcher=new NfsExports(CacheSize,shortExpirationPeriod,"192.168.0.[0-9]+;[a-z]+.b.com rw");
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname2));
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,address1));
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address2,hostname1));
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address2,hostname2));
Thread.sleep(1000);
AccessPrivilege ap;
long startNanos=System.nanoTime();
do {
ap=matcher.getAccessPrivilege(address2,address2);
if (ap == AccessPrivilege.NONE) {
break;
}
Thread.sleep(500);
}
while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
Assert.assertEquals(AccessPrivilege.NONE,ap);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testExactAddressRW(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,address1 + " rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertFalse(AccessPrivilege.READ_WRITE == matcher.getAccessPrivilege(address2,hostname1));
}
InternalCallVerifier EqualityVerifier
@Test public void testRegexHostRO(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"[a-z]+.b.com");
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname2));
}
InternalCallVerifier EqualityVerifier
@Test public void testRegexIPRW(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.[0-9]+ rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1));
}
InternalCallVerifier EqualityVerifier
@Test public void testCidrShortRW(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.0/22 rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1));
}
InternalCallVerifier EqualityVerifier
@Test public void testCidrLongRW(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.0/255.255.252.0 rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1));
}
InternalCallVerifier EqualityVerifier
@Test public void testExactAddressRO(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,address1);
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1));
}
InternalCallVerifier EqualityVerifier
@Test public void testRegexHostRW(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"[a-z]+.b.com rw");
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname2));
}
InternalCallVerifier EqualityVerifier
@Test public void testRegexIPRO(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.[0-9]+");
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1));
}
InternalCallVerifier EqualityVerifier
@Test public void testCidrLongRO(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.0/255.255.252.0");
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1));
}
InternalCallVerifier EqualityVerifier
@Test public void testCidrShortRO(){
NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.0/22");
Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1));
Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1));
}
InternalCallVerifier EqualityVerifier
@Test public void testConstructor(){
NfsTime nfstime=new NfsTime(1001);
Assert.assertEquals(1,nfstime.getSeconds());
Assert.assertEquals(1000000,nfstime.getNseconds());
}
InternalCallVerifier EqualityVerifier
@Test public void testSerializeDeserialize(){
NfsTime t1=new NfsTime(1001);
XDR xdr=new XDR();
t1.serialize(xdr);
NfsTime t2=NfsTime.deserialize(xdr.asReadOnlyWrap());
Assert.assertEquals(t1,t2);
}
InternalCallVerifier EqualityVerifier
@Test public void testConstructor(){
FileHandle handle=new FileHandle(1024);
XDR xdr=new XDR();
handle.serialize(xdr);
Assert.assertEquals(handle.getFileId(),1024);
FileHandle handle2=new FileHandle();
handle2.deserialize(xdr.asReadOnlyWrap());
Assert.assertEquals("Failed: Assert 1024 is id ",1024,handle.getFileId());
}
InternalCallVerifier EqualityVerifier
@Test public void testStaticMapping() throws IOException {
Map uidStaticMap=new PassThroughMap();
Map gidStaticMap=new PassThroughMap();
uidStaticMap.put(11501,10);
gidStaticMap.put(497,200);
BiMap uMap=HashBiMap.create();
BiMap gMap=HashBiMap.create();
String GET_ALL_USERS_CMD="echo \"atm:x:1000:1000:Aaron T. Myers,,,:/home/atm:/bin/bash\n" + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\"" + " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD="echo \"hdfs:*:11501:hrt_hdfs\n" + "mapred:x:497\n" + "mapred2:x:498\""+ " | cut -d: -f1,3";
IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",uidStaticMap);
IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",gidStaticMap);
assertEquals("hdfs",uMap.get(10));
assertEquals(10,(int)uMap.inverse().get("hdfs"));
assertEquals("atm",uMap.get(1000));
assertEquals(1000,(int)uMap.inverse().get("atm"));
assertEquals("hdfs",gMap.get(11501));
assertEquals(11501,(int)gMap.inverse().get("hdfs"));
assertEquals("mapred",gMap.get(200));
assertEquals(200,(int)gMap.inverse().get("mapred"));
assertEquals("mapred2",gMap.get(498));
assertEquals(498,(int)gMap.inverse().get("mapred2"));
}
InternalCallVerifier EqualityVerifier
@Test public void testUserUpdateSetting() throws IOException {
IdUserGroup iug=new IdUserGroup(new Configuration());
assertEquals(iug.getTimeout(),Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT);
Configuration conf=new Configuration();
conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,0);
iug=new IdUserGroup(conf);
assertEquals(iug.getTimeout(),Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN);
conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
iug=new IdUserGroup(conf);
assertEquals(iug.getTimeout(),Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testIdOutOfIntegerRange() throws IOException {
String GET_ALL_USERS_CMD="echo \"" + "nfsnobody:x:4294967294:4294967294:Anonymous NFS User:/var/lib/nfs:/sbin/nologin\n" + "nfsnobody1:x:4294967295:4294967295:Anonymous NFS User:/var/lib/nfs1:/sbin/nologin\n"+ "maxint:x:2147483647:2147483647:Grid Distributed File System:/home/maxint:/bin/bash\n"+ "minint:x:2147483648:2147483648:Grid Distributed File System:/home/minint:/bin/bash\n"+ "archivebackup:*:1031:4294967294:Archive Backup:/home/users/archivebackup:/bin/sh\n"+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""+ " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD="echo \"" + "hdfs:*:11501:hrt_hdfs\n" + "rpcuser:*:29:\n"+ "nfsnobody:*:4294967294:\n"+ "nfsnobody1:*:4294967295:\n"+ "maxint:*:2147483647:\n"+ "minint:*:2147483648:\n"+ "mapred3:x:498\""+ " | cut -d: -f1,3";
BiMap uMap=HashBiMap.create();
BiMap gMap=HashBiMap.create();
IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",EMPTY_PASS_THROUGH_MAP);
assertTrue(uMap.size() == 7);
assertEquals("nfsnobody",uMap.get(-2));
assertEquals("nfsnobody1",uMap.get(-1));
assertEquals("maxint",uMap.get(2147483647));
assertEquals("minint",uMap.get(-2147483648));
assertEquals("archivebackup",uMap.get(1031));
assertEquals("hdfs",uMap.get(11501));
assertEquals("daemon",uMap.get(2));
IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",EMPTY_PASS_THROUGH_MAP);
assertTrue(gMap.size() == 7);
assertEquals("hdfs",gMap.get(11501));
assertEquals("rpcuser",gMap.get(29));
assertEquals("nfsnobody",gMap.get(-2));
assertEquals("nfsnobody1",gMap.get(-1));
assertEquals("maxint",gMap.get(2147483647));
assertEquals("minint",gMap.get(-2147483648));
assertEquals("mapred3",gMap.get(498));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDuplicates() throws IOException {
String GET_ALL_USERS_CMD="echo \"root:x:0:0:root:/root:/bin/bash\n" + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "bin:x:2:2:bin:/bin:/bin/sh\n"+ "bin:x:1:1:bin:/bin:/sbin/nologin\n"+ "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""+ " | cut -d: -f1,3";
String GET_ALL_GROUPS_CMD="echo \"hdfs:*:11501:hrt_hdfs\n" + "mapred:x:497\n" + "mapred2:x:497\n"+ "mapred:x:498\n"+ "mapred3:x:498\""+ " | cut -d: -f1,3";
BiMap uMap=HashBiMap.create();
BiMap gMap=HashBiMap.create();
IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",EMPTY_PASS_THROUGH_MAP);
assertEquals(5,uMap.size());
assertEquals("root",uMap.get(0));
assertEquals("hdfs",uMap.get(11501));
assertEquals("hdfs2",uMap.get(11502));
assertEquals("bin",uMap.get(2));
assertEquals("daemon",uMap.get(1));
IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",EMPTY_PASS_THROUGH_MAP);
assertTrue(gMap.size() == 3);
assertEquals("hdfs",gMap.get(11501));
assertEquals("mapred",gMap.get(497));
assertEquals("mapred3",gMap.get(498));
}
InternalCallVerifier EqualityVerifier
@Test public void testUnprivilegedPort(){
int serverPort=startRpcServer(false);
XDR xdrOut=createGetportMount();
int bufsize=2 * 1024 * 1024;
byte[] buffer=new byte[bufsize];
xdrOut.writeFixedOpaque(buffer);
testRequest(xdrOut,serverPort);
assertEquals(0,resultSize);
xdrOut=new XDR();
createPortmapXDRheader(xdrOut,0);
int headerSize=xdrOut.size();
buffer=new byte[bufsize];
xdrOut.writeFixedOpaque(buffer);
int requestSize=xdrOut.size() - headerSize;
testRequest(xdrOut,serverPort);
assertEquals(requestSize,resultSize);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleFrames(){
RpcFrameDecoder decoder=new RpcFrameDecoder();
byte[] fragment1=new byte[4 + 10];
fragment1[0]=0;
fragment1[1]=0;
fragment1[2]=0;
fragment1[3]=(byte)10;
assertFalse(XDR.isLastFragment(fragment1));
assertTrue(XDR.fragmentSize(fragment1) == 10);
ByteBuffer buffer=ByteBuffer.allocate(4 + 10);
buffer.put(fragment1);
buffer.flip();
ChannelBuffer buf=new ByteBufferBackedChannelBuffer(buffer);
ChannelBuffer channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf);
assertTrue(channelBuffer == null);
byte[] fragment2=new byte[4 + 10];
fragment2[0]=(byte)(1 << 7);
fragment2[1]=0;
fragment2[2]=0;
fragment2[3]=(byte)10;
assertTrue(XDR.isLastFragment(fragment2));
assertTrue(XDR.fragmentSize(fragment2) == 10);
buffer=ByteBuffer.allocate(4 + 10);
buffer.put(fragment2);
buffer.flip();
buf=new ByteBufferBackedChannelBuffer(buffer);
channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf);
assertTrue(channelBuffer != null);
assertEquals(20,channelBuffer.readableBytes());
}
InternalCallVerifier EqualityVerifier
@Test public void testFrames(){
int serverPort=startRpcServer(true);
XDR xdrOut=createGetportMount();
int headerSize=xdrOut.size();
int bufsize=2 * 1024 * 1024;
byte[] buffer=new byte[bufsize];
xdrOut.writeFixedOpaque(buffer);
int requestSize=xdrOut.size() - headerSize;
testRequest(xdrOut,serverPort);
assertEquals(requestSize,resultSize);
}
InternalCallVerifier BooleanVerifier
@Test public void testSingleFrame(){
RpcFrameDecoder decoder=new RpcFrameDecoder();
ByteBuffer buffer=ByteBuffer.allocate(1);
ChannelBuffer buf=new ByteBufferBackedChannelBuffer(buffer);
ChannelBuffer channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf);
assertTrue(channelBuffer == null);
byte[] fragment=new byte[4 + 9];
fragment[0]=(byte)(1 << 7);
fragment[1]=0;
fragment[2]=0;
fragment[3]=(byte)10;
assertTrue(XDR.isLastFragment(fragment));
assertTrue(XDR.fragmentSize(fragment) == 10);
buffer=ByteBuffer.allocate(4 + 9);
buffer.put(fragment);
buffer.flip();
buf=new ByteBufferBackedChannelBuffer(buffer);
channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf);
assertTrue(channelBuffer == null);
}
InternalCallVerifier EqualityVerifier
@Test public void testConstructor(){
Verifier verifier=new VerifierNone();
RpcAcceptedReply reply=new RpcAcceptedReply(0,ReplyState.MSG_ACCEPTED,verifier,AcceptState.SUCCESS);
assertEquals(0,reply.getXid());
assertEquals(RpcMessage.Type.RPC_REPLY,reply.getMessageType());
assertEquals(ReplyState.MSG_ACCEPTED,reply.getState());
assertEquals(verifier,reply.getVerifier());
assertEquals(AcceptState.SUCCESS,reply.getAcceptState());
}
InternalCallVerifier EqualityVerifier
@Test public void testConstructor(){
Credentials credential=new CredentialsNone();
Verifier verifier=new VerifierNone();
int rpcVersion=RpcCall.RPC_VERSION;
int program=2;
int version=3;
int procedure=4;
RpcCall call=new RpcCall(0,RpcMessage.Type.RPC_CALL,rpcVersion,program,version,procedure,credential,verifier);
assertEquals(0,call.getXid());
assertEquals(RpcMessage.Type.RPC_CALL,call.getMessageType());
assertEquals(rpcVersion,call.getRpcVersion());
assertEquals(program,call.getProgram());
assertEquals(version,call.getVersion());
assertEquals(procedure,call.getProcedure());
assertEquals(credential,call.getCredential());
assertEquals(verifier,call.getVerifier());
}
InternalCallVerifier NullVerifier
@Test public void testAddRemoveEntries() throws UnknownHostException {
RpcCallCache cache=new RpcCallCache("test",100);
InetAddress clientIp=InetAddress.getByName("1.1.1.1");
int xid=100;
CacheEntry e=cache.checkOrAddToCache(clientIp,xid);
assertNull(e);
e=cache.checkOrAddToCache(clientIp,xid);
validateInprogressCacheEntry(e);
RpcResponse response=mock(RpcResponse.class);
cache.callCompleted(clientIp,xid,response);
e=cache.checkOrAddToCache(clientIp,xid);
validateCompletedCacheEntry(e,response);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testCacheEntry(){
CacheEntry c=new CacheEntry();
validateInprogressCacheEntry(c);
assertTrue(c.isInProgress());
assertFalse(c.isCompleted());
assertNull(c.getResponse());
RpcResponse response=mock(RpcResponse.class);
c.setResponse(response);
validateCompletedCacheEntry(c,response);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCacheFunctionality() throws UnknownHostException {
RpcCallCache cache=new RpcCallCache("Test",10);
int size=0;
for (int clientId=0; clientId < 20; clientId++) {
InetAddress clientIp=InetAddress.getByName("1.1.1." + clientId);
System.out.println("Adding " + clientIp);
cache.checkOrAddToCache(clientIp,0);
size=Math.min(++size,10);
System.out.println("Cache size " + cache.size());
assertEquals(size,cache.size());
int startEntry=Math.max(clientId - 10 + 1,0);
Iterator> iterator=cache.iterator();
for (int i=0; i < size; i++) {
ClientRequest key=iterator.next().getKey();
System.out.println("Entry " + key.getClientId());
assertEquals(InetAddress.getByName("1.1.1." + (startEntry + i)),key.getClientId());
}
for (int i=0; i < size; i++) {
CacheEntry e=cache.checkOrAddToCache(InetAddress.getByName("1.1.1." + (startEntry + i)),0);
assertNotNull(e);
assertTrue(e.isInProgress());
assertFalse(e.isCompleted());
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testConstructor(){
RpcDeniedReply reply=new RpcDeniedReply(0,ReplyState.MSG_ACCEPTED,RejectState.AUTH_ERROR,new VerifierNone());
Assert.assertEquals(0,reply.getXid());
Assert.assertEquals(RpcMessage.Type.RPC_REPLY,reply.getMessageType());
Assert.assertEquals(ReplyState.MSG_ACCEPTED,reply.getState());
Assert.assertEquals(RejectState.AUTH_ERROR,reply.getRejectState());
}
InternalCallVerifier EqualityVerifier
@Test public void testRpcMessage(){
RpcMessage msg=getRpcMessage(0,RpcMessage.Type.RPC_CALL);
Assert.assertEquals(0,msg.getXid());
Assert.assertEquals(RpcMessage.Type.RPC_CALL,msg.getMessageType());
}
InternalCallVerifier EqualityVerifier
@Test public void testRpcReply(){
RpcReply reply=new RpcReply(0,ReplyState.MSG_ACCEPTED,new VerifierNone()){
@Override public XDR write( XDR xdr){
return null;
}
}
;
Assert.assertEquals(0,reply.getXid());
Assert.assertEquals(RpcMessage.Type.RPC_REPLY,reply.getMessageType());
Assert.assertEquals(ReplyState.MSG_ACCEPTED,reply.getState());
}
InternalCallVerifier EqualityVerifier
@Test public void testReadWrite(){
CredentialsSys credential=new CredentialsSys();
credential.setUID(0);
credential.setGID(1);
XDR xdr=new XDR();
credential.write(xdr);
CredentialsSys newCredential=new CredentialsSys();
newCredential.read(xdr.asReadOnlyWrap());
assertEquals(0,newCredential.getUID());
assertEquals(1,newCredential.getGID());
}
InternalCallVerifier BooleanVerifier
@Test public void TestMultipleGroupsMapping() throws Exception {
Groups groups=new Groups(conf);
assertTrue(groups.getGroups(john.name).get(0).equals(john.group));
assertTrue(groups.getGroups(hdfs.name).get(0).equals(hdfs.group));
}
InternalCallVerifier BooleanVerifier
@Test public void TestMultipleGroupsMappingWithoutCombined() throws Exception {
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_COMBINED_CONFIG_KEY,"false");
Groups groups=new Groups(conf);
assertTrue(groups.getGroups(jack.name).size() == 1);
assertTrue(groups.getGroups(jack.name).get(0).equals(jack.group));
}
InternalCallVerifier BooleanVerifier
@Test public void TestMultipleGroupsMappingWithCombined() throws Exception {
conf.set(CompositeGroupsMapping.MAPPING_PROVIDERS_COMBINED_CONFIG_KEY,"true");
Groups groups=new Groups(conf);
assertTrue(groups.getGroups(jack.name).size() == 2);
assertTrue(groups.getGroups(jack.name).contains(jack.group));
assertTrue(groups.getGroups(jack.name).contains(jack.group2));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test public void testReadWriteStorage() throws IOException, NoSuchAlgorithmException {
Credentials ts=new Credentials();
Token token1=new Token();
Token token2=new Token();
Text service1=new Text("service1");
Text service2=new Text("service2");
Collection services=new ArrayList();
services.add(service1);
services.add(service2);
token1.setService(service1);
token2.setService(service2);
ts.addToken(new Text("sometoken1"),token1);
ts.addToken(new Text("sometoken2"),token2);
final KeyGenerator kg=KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM);
String alias="alias";
Map m=new HashMap(10);
for (int i=0; i < 10; i++) {
Key key=kg.generateKey();
m.put(new Text(alias + i),key.getEncoded());
ts.addSecretKey(new Text(alias + i),key.getEncoded());
}
File tmpFileName=new File(tmpDir,"tokenStorageTest");
DataOutputStream dos=new DataOutputStream(new FileOutputStream(tmpFileName));
ts.write(dos);
dos.close();
DataInputStream dis=new DataInputStream(new FileInputStream(tmpFileName));
ts=new Credentials();
ts.readFields(dis);
dis.close();
Collection> list=ts.getAllTokens();
assertEquals("getAllTokens should return collection of size 2",list.size(),2);
boolean foundFirst=false;
boolean foundSecond=false;
for ( Token extends TokenIdentifier> token : list) {
if (token.getService().equals(service1)) {
foundFirst=true;
}
if (token.getService().equals(service2)) {
foundSecond=true;
}
}
assertTrue("Tokens for services service1 and service2 must be present",foundFirst && foundSecond);
int mapLen=m.size();
assertEquals("wrong number of keys in the Storage",mapLen,ts.numberOfSecretKeys());
for ( Text a : m.keySet()) {
byte[] kTS=ts.getSecretKey(a);
byte[] kLocal=m.get(a);
assertTrue("keys don't match for " + a,WritableComparator.compareBytes(kTS,0,kTS.length,kLocal,0,kLocal.length) == 0);
}
tmpFileName.delete();
}
IterativeVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testAddTokensToUGI(){
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("someone");
Credentials creds=new Credentials();
for (int i=0; i < service.length; i++) {
creds.addToken(service[i],token[i]);
}
ugi.addCredentials(creds);
creds=ugi.getCredentials();
for (int i=0; i < service.length; i++) {
assertSame(token[i],creds.getToken(service[i]));
}
assertEquals(service.length,creds.numberOfTokens());
}
InternalCallVerifier EqualityVerifier
@Test public void addAll(){
Credentials creds=new Credentials();
creds.addToken(service[0],token[0]);
creds.addToken(service[1],token[1]);
creds.addSecretKey(secret[0],secret[0].getBytes());
creds.addSecretKey(secret[1],secret[1].getBytes());
Credentials credsToAdd=new Credentials();
credsToAdd.addToken(service[0],token[3]);
credsToAdd.addToken(service[2],token[2]);
credsToAdd.addSecretKey(secret[0],secret[3].getBytes());
credsToAdd.addSecretKey(secret[2],secret[2].getBytes());
creds.addAll(credsToAdd);
assertEquals(3,creds.numberOfTokens());
assertEquals(3,creds.numberOfSecretKeys());
assertEquals(token[3],creds.getToken(service[0]));
assertEquals(secret[3],new Text(creds.getSecretKey(secret[0])));
assertEquals(token[1],creds.getToken(service[1]));
assertEquals(secret[1],new Text(creds.getSecretKey(secret[1])));
assertEquals(token[2],creds.getToken(service[2]));
assertEquals(secret[2],new Text(creds.getSecretKey(secret[2])));
}
InternalCallVerifier EqualityVerifier
@Test public void mergeAll(){
Credentials creds=new Credentials();
creds.addToken(service[0],token[0]);
creds.addToken(service[1],token[1]);
creds.addSecretKey(secret[0],secret[0].getBytes());
creds.addSecretKey(secret[1],secret[1].getBytes());
Credentials credsToAdd=new Credentials();
credsToAdd.addToken(service[0],token[3]);
credsToAdd.addToken(service[2],token[2]);
credsToAdd.addSecretKey(secret[0],secret[3].getBytes());
credsToAdd.addSecretKey(secret[2],secret[2].getBytes());
creds.mergeAll(credsToAdd);
assertEquals(3,creds.numberOfTokens());
assertEquals(3,creds.numberOfSecretKeys());
assertEquals(token[0],creds.getToken(service[0]));
assertEquals(secret[0],new Text(creds.getSecretKey(secret[0])));
assertEquals(token[1],creds.getToken(service[1]));
assertEquals(secret[1],new Text(creds.getSecretKey(secret[1])));
assertEquals(token[2],creds.getToken(service[2]));
assertEquals(secret[2],new Text(creds.getSecretKey(secret[2])));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
@Test public void testRealUserIPNotSpecified() throws IOException {
final Configuration conf=new Configuration();
conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),"group1");
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,realUserUgi,GROUP_NAMES);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws IOException {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
}
);
Assert.fail("The RPC must have failed " + retVal);
}
catch ( Exception e) {
e.printStackTrace();
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
@Test public void testRealUserIPAuthorizationFailure() throws IOException {
final Configuration conf=new Configuration();
conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_SHORT_NAME),"20.20.20.20");
conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),"group1");
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,realUserUgi,GROUP_NAMES);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws IOException {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
}
);
Assert.fail("The RPC must have failed " + retVal);
}
catch ( Exception e) {
e.printStackTrace();
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testProxyWithToken() throws Exception {
final Configuration conf=new Configuration(masterConf);
TestTokenSecretManager sm=new TestTokenSecretManager();
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,conf);
UserGroupInformation.setConfiguration(conf);
final Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()),new Text("SomeSuperUser"));
Token token=new Token(tokenId,sm);
SecurityUtil.setTokenService(token,addr);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,current,GROUP_NAMES);
proxyUserUgi.addToken(token);
refreshConf(conf);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws Exception {
try {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
catch ( Exception e) {
e.printStackTrace();
throw e;
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
}
);
Assert.assertEquals(REAL_USER_NAME + " (auth:TOKEN) via SomeSuperUser (auth:SIMPLE)",retVal);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testTokenBySuperUser() throws Exception {
TestTokenSecretManager sm=new TestTokenSecretManager();
final Configuration newConf=new Configuration(masterConf);
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,newConf);
UserGroupInformation.setConfiguration(newConf);
final Server server=new RPC.Builder(newConf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
server.start();
final UserGroupInformation current=UserGroupInformation.createUserForTesting(REAL_USER_NAME,GROUP_NAMES);
refreshConf(newConf);
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()),new Text("SomeSuperUser"));
Token token=new Token(tokenId,sm);
SecurityUtil.setTokenService(token,addr);
current.addToken(token);
String retVal=current.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws Exception {
try {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,newConf);
String ret=proxy.aMethod();
return ret;
}
catch ( Exception e) {
e.printStackTrace();
throw e;
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
}
);
String expected=REAL_USER_NAME + " (auth:TOKEN) via SomeSuperUser (auth:SIMPLE)";
Assert.assertEquals(retVal + "!=" + expected,expected,retVal);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
@Test public void testRealUserGroupAuthorizationFailure() throws IOException {
final Configuration conf=new Configuration();
configureSuperUserIPAddresses(conf,REAL_USER_SHORT_NAME);
conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),"group3");
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
refreshConf(conf);
try {
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,realUserUgi,GROUP_NAMES);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws IOException {
proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
}
);
Assert.fail("The RPC must have failed " + retVal);
}
catch ( Exception e) {
e.printStackTrace();
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier
@Test public void testRealUserGroupNotSpecified() throws IOException {
final Configuration conf=new Configuration();
configureSuperUserIPAddresses(conf,REAL_USER_SHORT_NAME);
Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build();
try {
server.start();
final InetSocketAddress addr=NetUtils.getConnectAddress(server);
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,realUserUgi,GROUP_NAMES);
String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public String run() throws IOException {
proxy=(TestProtocol)RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf);
String ret=proxy.aMethod();
return ret;
}
}
);
Assert.fail("The RPC must have failed " + retVal);
}
catch ( Exception e) {
e.printStackTrace();
}
finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Test method for{@link org.apache.hadoop.security.UserGroupInformation#createProxyUser(java.lang.String,org.apache.hadoop.security.UserGroupInformation)}.
*/
@Test public void testCreateProxyUser() throws Exception {
UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUser(PROXY_USER_NAME,realUserUgi);
UserGroupInformation curUGI=proxyUserUgi.doAs(new PrivilegedExceptionAction(){
@Override public UserGroupInformation run() throws IOException {
return UserGroupInformation.getCurrentUser();
}
}
);
Assert.assertEquals(PROXY_USER_NAME + " (auth:PROXY) via " + REAL_USER_NAME+ " (auth:SIMPLE)",curUGI.toString());
}
InternalCallVerifier BooleanVerifier
@Test public void testGroupShell() throws Exception {
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,"org.apache.hadoop.security.ShellBasedUnixGroupsMapping");
Groups groups=new Groups(conf);
String username=System.getProperty("user.name");
List groupList=groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testNetgroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " + "test the normal path and 'mvn -DTestGroupFallback clear test' will" + " test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,"org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMappingWithFallback");
Groups groups=new Groups(conf);
String username=System.getProperty("user.name");
List groupList=groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testNetgroupShell() throws Exception {
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,"org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping");
Groups groups=new Groups(conf);
String username=System.getProperty("user.name");
List groupList=groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
InternalCallVerifier BooleanVerifier
@Test public void testGroupWithFallback() throws Exception {
LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " + "test the normal path and 'mvn -DTestGroupFallback clear test' will" + " test the fall back functionality");
Logger.getRootLogger().setLevel(Level.DEBUG);
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,"org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");
Groups groups=new Groups(conf);
String username=System.getProperty("user.name");
List groupList=groups.getGroups(username);
LOG.info(username + " has GROUPS: " + groupList.toString());
assertTrue(groupList.size() > 0);
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testGroupsCaching() throws Exception {
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,0);
Groups groups=new Groups(conf);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
FakeGroupMapping.addToBlackList("user1");
assertTrue(groups.getGroups("me").size() == 2);
FakeGroupMapping.addToBlackList("me");
assertTrue(groups.getGroups("me").size() == 2);
try {
LOG.error("We are not supposed to get here." + groups.getGroups("user1").toString());
fail();
}
catch ( IOException ioe) {
if (!ioe.getMessage().startsWith("No groups found")) {
LOG.error("Got unexpected exception: " + ioe.getMessage());
fail();
}
}
FakeGroupMapping.clearBlackList();
assertTrue(groups.getGroups("user1").size() == 2);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNegativeGroupCaching() throws Exception {
final String user="negcache";
final String failMessage="Did not throw IOException: ";
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,2);
FakeTimer timer=new FakeTimer();
Groups groups=new Groups(conf,timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.addToBlackList(user);
try {
groups.getGroups(user);
fail(failMessage + "Failed to obtain groups from FakeGroupMapping.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user",e);
}
try {
groups.getGroups(user);
fail(failMessage + "The user is in the negative cache.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user",e);
}
FakeGroupMapping.clearBlackList();
try {
groups.getGroups(user);
fail(failMessage + "The user is still in the negative cache, even " + "FakeGroupMapping has resumed.");
}
catch ( IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user",e);
}
timer.advance(4 * 1000);
assertEquals(Arrays.asList(myGroups),groups.getGroups(user));
}
InternalCallVerifier BooleanVerifier
@Test public void testGroupLookupForStaticUsers() throws Exception {
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,FakeunPrivilegedGroupMapping.class,ShellBasedUnixGroupsMapping.class);
conf.set(CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES,"me=;user1=group1;user2=group1,group2");
Groups groups=new Groups(conf);
List userGroups=groups.getGroups("me");
assertTrue("non-empty groups for static user",userGroups.isEmpty());
assertFalse("group lookup done for static user",FakeunPrivilegedGroupMapping.invoked);
List expected=new ArrayList();
expected.add("group1");
FakeunPrivilegedGroupMapping.invoked=false;
userGroups=groups.getGroups("user1");
assertTrue("groups not correct",expected.equals(userGroups));
assertFalse("group lookup done for unprivileged user",FakeunPrivilegedGroupMapping.invoked);
expected.add("group2");
FakeunPrivilegedGroupMapping.invoked=false;
userGroups=groups.getGroups("user2");
assertTrue("groups not correct",expected.equals(userGroups));
assertFalse("group lookup done for unprivileged user",FakeunPrivilegedGroupMapping.invoked);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testConfGetPassword() throws Exception {
File testDir=new File(System.getProperty("test.build.data","target/test-dir"));
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir+ "/test.jks";
File file=new File(testDir,"test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,ourUrl);
CredentialProvider provider=CredentialProviderFactory.getProviders(conf).get(0);
char[] bindpass={'b','i','n','d','p','a','s','s'};
char[] storepass={'s','t','o','r','e','p','a','s','s'};
assertEquals(null,provider.getCredentialEntry(LdapGroupsMapping.BIND_PASSWORD_KEY));
assertEquals(null,provider.getCredentialEntry(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY));
try {
provider.createCredentialEntry(LdapGroupsMapping.BIND_PASSWORD_KEY,bindpass);
provider.createCredentialEntry(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY,storepass);
provider.flush();
}
catch ( Exception e) {
e.printStackTrace();
throw e;
}
assertArrayEquals(bindpass,provider.getCredentialEntry(LdapGroupsMapping.BIND_PASSWORD_KEY).getCredential());
assertArrayEquals(storepass,provider.getCredentialEntry(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY).getCredential());
LdapGroupsMapping mapping=new LdapGroupsMapping();
Assert.assertEquals("bindpass",mapping.getPassword(conf,LdapGroupsMapping.BIND_PASSWORD_KEY,""));
Assert.assertEquals("storepass",mapping.getPassword(conf,LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY,""));
Assert.assertEquals("",mapping.getPassword(conf,"invalid-alias",""));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFilePermission() throws Exception {
final Configuration conf=new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,true);
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
try {
FileSystem nnfs=FileSystem.get(conf);
assertFalse(nnfs.exists(CHILD_FILE1));
try {
nnfs.setOwner(CHILD_FILE1,"foo","bar");
assertTrue(false);
}
catch ( java.io.FileNotFoundException e) {
LOG.info("GOOD: got " + e);
}
try {
nnfs.setPermission(CHILD_FILE1,new FsPermission((short)0777));
assertTrue(false);
}
catch ( java.io.FileNotFoundException e) {
LOG.info("GOOD: got " + e);
}
FSDataOutputStream out=nnfs.create(CHILD_FILE1,new FsPermission((short)0777),true,1024,(short)1,1024,null);
FileStatus status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr-xr-x"));
nnfs.delete(CHILD_FILE1,false);
nnfs.mkdirs(CHILD_DIR1);
out=nnfs.create(CHILD_FILE1);
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rw-r--r--"));
byte data[]=new byte[FILE_LEN];
RAN.nextBytes(data);
out.write(data);
out.close();
nnfs.setPermission(CHILD_FILE1,new FsPermission("700"));
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwx------"));
byte dataIn[]=new byte[FILE_LEN];
FSDataInputStream fin=nnfs.open(CHILD_FILE1);
int bytesRead=fin.read(dataIn);
assertTrue(bytesRead == FILE_LEN);
for (int i=0; i < FILE_LEN; i++) {
assertEquals(data[i],dataIn[i]);
}
nnfs.setPermission(CHILD_FILE1,new FsPermission("755"));
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr-xr-x"));
nnfs.setPermission(CHILD_FILE1,new FsPermission("744"));
status=nnfs.getFileStatus(CHILD_FILE1);
assertTrue(status.getPermission().toString().equals("rwxr--r--"));
nnfs.setPermission(CHILD_FILE1,new FsPermission("700"));
UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf);
userfs.mkdirs(CHILD_DIR1);
assertTrue(!canMkdirs(userfs,CHILD_DIR2));
assertTrue(!canCreate(userfs,CHILD_FILE2));
assertTrue(!canOpen(userfs,CHILD_FILE1));
nnfs.setPermission(ROOT_PATH,new FsPermission((short)0755));
nnfs.setPermission(CHILD_DIR1,new FsPermission("777"));
nnfs.setPermission(new Path("/"),new FsPermission((short)0777));
final Path RENAME_PATH=new Path("/foo/bar");
userfs.mkdirs(RENAME_PATH);
assertTrue(canRename(userfs,RENAME_PATH,CHILD_DIR1));
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier
/**
* Tests backward compatibility. Configuration can be
* either set with old param dfs.umask that takes decimal umasks
* or dfs.umaskmode that takes symbolic or octal umask.
*/
@Test public void testBackwardCompatibility(){
FsPermission perm=new FsPermission((short)18);
Configuration conf=new Configuration();
FsPermission.setUMask(conf,perm);
assertEquals(18,FsPermission.getUMask(conf).toShort());
perm=new FsPermission((short)18);
conf=new Configuration();
conf.set(FsPermission.DEPRECATED_UMASK_LABEL,"18");
assertEquals(18,FsPermission.getUMask(conf).toShort());
conf=new Configuration();
conf.set(FsPermission.DEPRECATED_UMASK_LABEL,"18");
conf.set(FsPermission.UMASK_LABEL,"000");
assertEquals(18,FsPermission.getUMask(conf).toShort());
conf=new Configuration();
conf.set(FsPermission.UMASK_LABEL,"022");
assertEquals(18,FsPermission.getUMask(conf).toShort());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test HADOOP_PROXY_USER for impersonation
*/
@Test public void testProxyUserFromEnvironment() throws IOException {
String proxyUser="foo.bar";
System.setProperty(UserGroupInformation.HADOOP_PROXY_USER,proxyUser);
UserGroupInformation ugi=UserGroupInformation.getLoginUser();
assertEquals(proxyUser,ugi.getUserName());
UserGroupInformation realUgi=ugi.getRealUser();
assertNotNull(realUgi);
Process pp=Runtime.getRuntime().exec("whoami");
BufferedReader br=new BufferedReader(new InputStreamReader(pp.getInputStream()));
String realUser=br.readLine().trim();
int backslashIndex=realUser.indexOf('\\');
if (backslashIndex != -1) {
realUser=realUser.substring(backslashIndex + 1);
}
assertEquals(realUser,realUgi.getUserName());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGroupMappingRefresh() throws Exception {
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshUserToGroupsMappings"};
Groups groups=Groups.getUserToGroupsMappingService(config);
String user=UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List g1=groups.getGroups(user);
String[] str_groups=new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List g2=groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g2.size(); i++) {
assertEquals("Should be same group ",g1.get(i),g2.get(i));
}
admin.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List g3=groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i)));
}
Thread.sleep(groupRefreshTimeoutSec * 1100);
System.out.println("fourth attempt(after timeout), should be different:");
List g4=groups.getGroups(user);
g4.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i=0; i < g4.size(); i++) {
assertFalse("Should be different group ",g3.get(i).equals(g4.get(i)));
}
}
UtilityVerifier InternalCallVerifier
@Test public void testRefreshSuperUserGroupsConfiguration() throws Exception {
final String SUPER_USER="super_user";
final String[] GROUP_NAMES1=new String[]{"gr1","gr2"};
final String[] GROUP_NAMES2=new String[]{"gr3","gr4"};
String userKeyGroups=DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(SUPER_USER);
String userKeyHosts=DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(SUPER_USER);
config.set(userKeyGroups,"gr3,gr4,gr5");
config.set(userKeyHosts,"127.0.0.1");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
UserGroupInformation ugi1=mock(UserGroupInformation.class);
UserGroupInformation ugi2=mock(UserGroupInformation.class);
UserGroupInformation suUgi=mock(UserGroupInformation.class);
when(ugi1.getRealUser()).thenReturn(suUgi);
when(ugi2.getRealUser()).thenReturn(suUgi);
when(suUgi.getShortUserName()).thenReturn(SUPER_USER);
when(suUgi.getUserName()).thenReturn(SUPER_USER + "L");
when(ugi1.getShortUserName()).thenReturn("user1");
when(ugi2.getShortUserName()).thenReturn("user2");
when(ugi1.getUserName()).thenReturn("userL1");
when(ugi2.getUserName()).thenReturn("userL2");
when(ugi1.getGroupNames()).thenReturn(GROUP_NAMES1);
when(ugi2.getGroupNames()).thenReturn(GROUP_NAMES2);
try {
ProxyUsers.authorize(ugi1,"127.0.0.1");
fail("first auth for " + ugi1.getShortUserName() + " should've failed ");
}
catch ( AuthorizationException e) {
System.err.println("auth for " + ugi1.getUserName() + " failed");
}
try {
ProxyUsers.authorize(ugi2,"127.0.0.1");
System.err.println("auth for " + ugi2.getUserName() + " succeeded");
}
catch ( AuthorizationException e) {
fail("first auth for " + ugi2.getShortUserName() + " should've succeeded: "+ e.getLocalizedMessage());
}
String rsrc="testGroupMappingRefresh_rsrc.xml";
addNewConfigResource(rsrc,userKeyGroups,"gr2",userKeyHosts,"127.0.0.1");
DFSAdmin admin=new DFSAdmin(config);
String[] args=new String[]{"-refreshSuperUserGroupsConfiguration"};
admin.run(args);
try {
ProxyUsers.authorize(ugi2,"127.0.0.1");
fail("second auth for " + ugi2.getShortUserName() + " should've failed ");
}
catch ( AuthorizationException e) {
System.err.println("auth for " + ugi2.getUserName() + " failed");
}
try {
ProxyUsers.authorize(ugi1,"127.0.0.1");
System.err.println("auth for " + ugi1.getUserName() + " succeeded");
}
catch ( AuthorizationException e) {
fail("second auth for " + ugi1.getShortUserName() + " should've succeeded: "+ e.getLocalizedMessage());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetAuthenticationMethod(){
Configuration conf=new Configuration();
conf.unset(HADOOP_SECURITY_AUTHENTICATION);
assertEquals(SIMPLE,SecurityUtil.getAuthenticationMethod(conf));
conf.set(HADOOP_SECURITY_AUTHENTICATION,"simple");
assertEquals(SIMPLE,SecurityUtil.getAuthenticationMethod(conf));
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos");
assertEquals(KERBEROS,SecurityUtil.getAuthenticationMethod(conf));
conf.set(HADOOP_SECURITY_AUTHENTICATION,"kaboom");
String error=null;
try {
SecurityUtil.getAuthenticationMethod(conf);
}
catch ( Exception e) {
error=e.toString();
}
assertEquals("java.lang.IllegalArgumentException: " + "Invalid attribute value for " + HADOOP_SECURITY_AUTHENTICATION + " of kaboom",error);
}
InternalCallVerifier EqualityVerifier
@Test public void testSetAuthenticationMethod(){
Configuration conf=new Configuration();
SecurityUtil.setAuthenticationMethod(null,conf);
assertEquals("simple",conf.get(HADOOP_SECURITY_AUTHENTICATION));
SecurityUtil.setAuthenticationMethod(SIMPLE,conf);
assertEquals("simple",conf.get(HADOOP_SECURITY_AUTHENTICATION));
SecurityUtil.setAuthenticationMethod(KERBEROS,conf);
assertEquals("kerberos",conf.get(HADOOP_SECURITY_AUTHENTICATION));
}
BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetUGIFromKerberosSubject() throws IOException {
String user1keyTabFilepath=System.getProperty("kdc.resource.dir") + "/keytabs/user1.keytab";
UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1keyTabFilepath);
Set principals=ugi.getSubject().getPrincipals(KerberosPrincipal.class);
if (principals.isEmpty()) {
Assert.fail("There should be a kerberos principal in the subject.");
}
else {
UserGroupInformation ugi2=UserGroupInformation.getUGIFromSubject(ugi.getSubject());
if (ugi2 != null) {
ugi2.doAs(new PrivilegedAction(){
@Override public Object run(){
try {
UserGroupInformation ugi3=UserGroupInformation.getCurrentUser();
String doAsUserName=ugi3.getUserName();
assertEquals(doAsUserName,"user1@EXAMPLE.COM");
System.out.println("DO AS USERNAME: " + doAsUserName);
}
catch ( IOException e) {
e.printStackTrace();
}
return null;
}
}
);
}
}
}
InternalCallVerifier IdentityVerifier
@SuppressWarnings("unchecked") @Test(timeout=30000) public void testGetCreds() throws Exception {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("someone");
Text service=new Text("service");
Token t1=mock(Token.class);
when(t1.getService()).thenReturn(service);
Token t2=mock(Token.class);
when(t2.getService()).thenReturn(new Text("service2"));
Token t3=mock(Token.class);
when(t3.getService()).thenReturn(service);
ugi.addToken(t1);
ugi.addToken(t2);
checkTokens(ugi,t1,t2);
Credentials creds=ugi.getCredentials();
creds.addToken(t3.getService(),t3);
assertSame(t3,creds.getToken(service));
checkTokens(ugi,t1,t2);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* given user name - get all the groups.
* Needs to happen before creating the test users
*/
@Test(timeout=30000) public void testGetServerSideGroups() throws IOException, InterruptedException {
Process pp=Runtime.getRuntime().exec("whoami");
BufferedReader br=new BufferedReader(new InputStreamReader(pp.getInputStream()));
String userName=br.readLine().trim();
if (Shell.WINDOWS) {
int sp=userName.lastIndexOf('\\');
if (sp != -1) {
userName=userName.substring(sp + 1);
}
userName=userName.toLowerCase();
}
pp=Runtime.getRuntime().exec(Shell.WINDOWS ? Shell.WINUTILS + " groups -F" : "id -Gn");
br=new BufferedReader(new InputStreamReader(pp.getInputStream()));
String line=br.readLine();
System.out.println(userName + ":" + line);
Set groups=new LinkedHashSet();
String[] tokens=line.split(Shell.TOKEN_SEPARATOR_REGEX);
for ( String s : tokens) {
groups.add(s);
}
final UserGroupInformation login=UserGroupInformation.getCurrentUser();
String loginUserName=login.getShortUserName();
if (Shell.WINDOWS) {
loginUserName=loginUserName.toLowerCase();
}
assertEquals(userName,loginUserName);
String[] gi=login.getGroupNames();
assertEquals(groups.size(),gi.length);
for (int i=0; i < gi.length; i++) {
assertTrue(groups.contains(gi[i]));
}
final UserGroupInformation fakeUser=UserGroupInformation.createRemoteUser("foo.bar");
fakeUser.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws IOException {
UserGroupInformation current=UserGroupInformation.getCurrentUser();
assertFalse(current.equals(login));
assertEquals(current,fakeUser);
assertEquals(0,current.getGroupNames().length);
return null;
}
}
);
}
UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test(timeout=30000) public void testUGITokens() throws Exception {
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("TheDoctor",new String[]{"TheTARDIS"});
Token t1=mock(Token.class);
when(t1.getService()).thenReturn(new Text("t1"));
Token t2=mock(Token.class);
when(t2.getService()).thenReturn(new Text("t2"));
Credentials creds=new Credentials();
byte[] secretKey=new byte[]{};
Text secretName=new Text("shhh");
creds.addSecretKey(secretName,secretKey);
ugi.addToken(t1);
ugi.addToken(t2);
ugi.addCredentials(creds);
Collection> z=ugi.getTokens();
assertTrue(z.contains(t1));
assertTrue(z.contains(t2));
assertEquals(2,z.size());
Credentials ugiCreds=ugi.getCredentials();
assertSame(secretKey,ugiCreds.getSecretKey(secretName));
assertEquals(1,ugiCreds.numberOfSecretKeys());
try {
z.remove(t1);
fail("Shouldn't be able to modify token collection from UGI");
}
catch ( UnsupportedOperationException uoe) {
}
Collection> otherSet=ugi.doAs(new PrivilegedExceptionAction>>(){
@Override public Collection> run() throws IOException {
return UserGroupInformation.getCurrentUser().getTokens();
}
}
);
assertTrue(otherSet.contains(t1));
assertTrue(otherSet.contains(t2));
}
InternalCallVerifier BooleanVerifier
/**
* Test hasSufficientTimeElapsed method
*/
@Test(timeout=30000) public void testHasSufficientTimeElapsed() throws Exception {
Method method=UserGroupInformation.class.getDeclaredMethod("hasSufficientTimeElapsed",long.class);
method.setAccessible(true);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
User user=ugi.getSubject().getPrincipals(User.class).iterator().next();
long now=System.currentTimeMillis();
user.setLastLogin(now - 2 * 60 * 1000);
assertTrue((Boolean)method.invoke(ugi,now));
user.setLastLogin(now - 30 * 1000);
assertFalse((Boolean)method.invoke(ugi,now));
Configuration conf2=new Configuration(conf);
conf2.setLong(CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN,10 * 60);
UserGroupInformation.setConfiguration(conf2);
user.setLastLogin(now - 15 * 60 * 1000);
assertTrue((Boolean)method.invoke(ugi,now));
user.setLastLogin(now - 6 * 60 * 1000);
assertFalse((Boolean)method.invoke(ugi,now));
UserGroupInformation.setConfiguration(conf);
method.setAccessible(false);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testLoginModuleCommit() throws Exception {
UserGroupInformation loginUgi=UserGroupInformation.getLoginUser();
User user1=loginUgi.getSubject().getPrincipals(User.class).iterator().next();
LoginContext login=user1.getLogin();
login.logout();
login.login();
User user2=loginUgi.getSubject().getPrincipals(User.class).iterator().next();
Assert.assertTrue(user1 == user2);
}
InternalCallVerifier IdentityVerifier
@SuppressWarnings("unchecked") @Test(timeout=30000) public void testAddNamedToken() throws Exception {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("someone");
Token t1=mock(Token.class);
Text service1=new Text("t1");
Text service2=new Text("t2");
when(t1.getService()).thenReturn(service1);
ugi.addToken(service1,t1);
assertSame(t1,ugi.getCredentials().getToken(service1));
ugi.addToken(service2,t1);
assertSame(t1,ugi.getCredentials().getToken(service1));
assertSame(t1,ugi.getCredentials().getToken(service2));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test login method
*/
@Test(timeout=30000) public void testLogin() throws Exception {
conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,String.valueOf(PERCENTILES_INTERVAL));
UserGroupInformation.setConfiguration(conf);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
assertEquals(UserGroupInformation.getCurrentUser(),UserGroupInformation.getLoginUser());
assertTrue(ugi.getGroupNames().length >= 1);
verifyGroupMetrics(1);
UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
UserGroupInformation curUGI=userGroupInfo.doAs(new PrivilegedExceptionAction(){
@Override public UserGroupInformation run() throws IOException {
return UserGroupInformation.getCurrentUser();
}
}
);
assertEquals(curUGI,userGroupInfo);
assertFalse(curUGI.equals(UserGroupInformation.getLoginUser()));
}
InternalCallVerifier EqualityVerifier
/**
* In some scenario, such as HA, delegation tokens are associated with a
* logical name. The tokens are cloned and are associated with the
* physical address of the server where the service is provided.
* This test ensures cloned delegated tokens are locally used
* and are not returned in {@link UserGroupInformation#getCredentials()}
*/
@Test public void testPrivateTokenExclusion() throws Exception {
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
TestTokenIdentifier tokenId=new TestTokenIdentifier();
Token token=new Token(tokenId.getBytes(),"password".getBytes(),tokenId.getKind(),null);
ugi.addToken(new Text("regular-token"),token);
ugi.addToken(new Text("private-token"),new Token.PrivateToken(token));
ugi.addToken(new Text("private-token1"),new Token.PrivateToken(token));
Collection> tokens=ugi.getCredentials().getAllTokens();
assertEquals(1,tokens.size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testGettingGroups() throws Exception {
UserGroupInformation uugi=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
assertEquals(USER_NAME,uugi.getUserName());
assertArrayEquals(new String[]{GROUP1_NAME,GROUP2_NAME,GROUP3_NAME},uugi.getGroupNames());
}
InternalCallVerifier IdentityVerifier
@Test(timeout=30000) public void testGetCredsNotSame() throws Exception {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("someone");
Credentials creds=ugi.getCredentials();
assertNotSame(creds,ugi.getCredentials());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testUGIAuthMethodInRealUser() throws Exception {
final UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
UserGroupInformation proxyUgi=UserGroupInformation.createProxyUser("proxy",ugi);
final AuthenticationMethod am=AuthenticationMethod.KERBEROS;
ugi.setAuthenticationMethod(am);
Assert.assertEquals(am,ugi.getAuthenticationMethod());
Assert.assertEquals(AuthenticationMethod.PROXY,proxyUgi.getAuthenticationMethod());
Assert.assertEquals(am,UserGroupInformation.getRealAuthenticationMethod(proxyUgi));
proxyUgi.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws IOException {
Assert.assertEquals(AuthenticationMethod.PROXY,UserGroupInformation.getCurrentUser().getAuthenticationMethod());
Assert.assertEquals(am,UserGroupInformation.getCurrentUser().getRealUser().getAuthenticationMethod());
return null;
}
}
);
UserGroupInformation proxyUgi2=new UserGroupInformation(proxyUgi.getSubject());
proxyUgi2.setAuthenticationMethod(AuthenticationMethod.PROXY);
Assert.assertEquals(proxyUgi,proxyUgi2);
UserGroupInformation realugi=UserGroupInformation.getCurrentUser();
UserGroupInformation proxyUgi3=UserGroupInformation.createProxyUser("proxyAnother",realugi);
UserGroupInformation proxyUgi4=new UserGroupInformation(proxyUgi3.getSubject());
Assert.assertEquals(proxyUgi3,proxyUgi4);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testEqualsWithRealUser() throws Exception {
UserGroupInformation realUgi1=UserGroupInformation.createUserForTesting("RealUser",GROUP_NAMES);
UserGroupInformation proxyUgi1=UserGroupInformation.createProxyUser(USER_NAME,realUgi1);
UserGroupInformation proxyUgi2=new UserGroupInformation(proxyUgi1.getSubject());
UserGroupInformation remoteUgi=UserGroupInformation.createRemoteUser(USER_NAME);
assertEquals(proxyUgi1,proxyUgi2);
assertFalse(remoteUgi.equals(proxyUgi1));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testUGIAuthMethod() throws Exception {
final UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
final AuthenticationMethod am=AuthenticationMethod.KERBEROS;
ugi.setAuthenticationMethod(am);
Assert.assertEquals(am,ugi.getAuthenticationMethod());
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Object run() throws IOException {
Assert.assertEquals(am,UserGroupInformation.getCurrentUser().getAuthenticationMethod());
return null;
}
}
);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testCreateRemoteUser(){
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("user1");
assertEquals(AuthenticationMethod.SIMPLE,ugi.getAuthenticationMethod());
assertTrue(ugi.toString().contains("(auth:SIMPLE)"));
ugi=UserGroupInformation.createRemoteUser("user1",AuthMethod.KERBEROS);
assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod());
assertTrue(ugi.toString().contains("(auth:KERBEROS)"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testTokenIdentifiers() throws Exception {
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("TheDoctor",new String[]{"TheTARDIS"});
TokenIdentifier t1=mock(TokenIdentifier.class);
TokenIdentifier t2=mock(TokenIdentifier.class);
ugi.addTokenIdentifier(t1);
ugi.addTokenIdentifier(t2);
Collection z=ugi.getTokenIdentifiers();
assertTrue(z.contains(t1));
assertTrue(z.contains(t2));
assertEquals(2,z.size());
Collection otherSet=ugi.doAs(new PrivilegedExceptionAction>(){
@Override public Collection run() throws IOException {
return UserGroupInformation.getCurrentUser().getTokenIdentifiers();
}
}
);
assertTrue(otherSet.contains(t1));
assertTrue(otherSet.contains(t2));
assertEquals(2,otherSet.size());
}
InternalCallVerifier IdentityVerifier
@SuppressWarnings("unchecked") @Test(timeout=30000) public void testAddCreds() throws Exception {
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("someone");
Text service=new Text("service");
Token t1=mock(Token.class);
when(t1.getService()).thenReturn(service);
Token t2=mock(Token.class);
when(t2.getService()).thenReturn(new Text("service2"));
byte[] secret=new byte[]{};
Text secretKey=new Text("sshhh");
Credentials creds=new Credentials();
creds.addToken(t1.getService(),t1);
creds.addToken(t2.getService(),t2);
creds.addSecretKey(secretKey,secret);
ugi.addCredentials(creds);
checkTokens(ugi,t1,t2);
assertSame(secret,ugi.getCredentials().getSecretKey(secretKey));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testLoginObjectInSubject() throws Exception {
UserGroupInformation loginUgi=UserGroupInformation.getLoginUser();
UserGroupInformation anotherUgi=new UserGroupInformation(loginUgi.getSubject());
LoginContext login1=loginUgi.getSubject().getPrincipals(User.class).iterator().next().getLogin();
LoginContext login2=anotherUgi.getSubject().getPrincipals(User.class).iterator().next().getLogin();
Assert.assertTrue(login1 == login2);
}
IterativeVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* This test checks a race condition between getting and adding tokens for
* the current user. Calling UserGroupInformation.getCurrentUser() returns
* a new object each time, so simply making these methods synchronized was not
* enough to prevent race conditions and causing a
* ConcurrentModificationException. These methods are synchronized on the
* Subject, which is the same object between UserGroupInformation instances.
* This test tries to cause a CME, by exposing the race condition. Previously
* this test would fail every time; now it does not.
*/
@Test public void testTokenRaceCondition() throws Exception {
UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
userGroupInfo.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
assertNotEquals(UserGroupInformation.getLoginUser(),UserGroupInformation.getCurrentUser());
GetTokenThread thread=new GetTokenThread();
try {
thread.start();
for (int i=0; i < 100; i++) {
@SuppressWarnings("unchecked") Token extends TokenIdentifier> t=mock(Token.class);
when(t.getService()).thenReturn(new Text("t" + i));
UserGroupInformation.getCurrentUser().addToken(t);
assertNull("ConcurrentModificationException encountered",thread.cme);
}
}
catch ( ConcurrentModificationException cme) {
cme.printStackTrace();
fail("ConcurrentModificationException encountered");
}
finally {
thread.runThread=false;
thread.join(5 * 1000);
}
return null;
}
}
);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testGetRealAuthenticationMethod(){
UserGroupInformation ugi=UserGroupInformation.createRemoteUser("user1");
ugi.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
assertEquals(AuthenticationMethod.SIMPLE,ugi.getAuthenticationMethod());
assertEquals(AuthenticationMethod.SIMPLE,ugi.getRealAuthenticationMethod());
ugi=UserGroupInformation.createProxyUser("user2",ugi);
assertEquals(AuthenticationMethod.PROXY,ugi.getAuthenticationMethod());
assertEquals(AuthenticationMethod.SIMPLE,ugi.getRealAuthenticationMethod());
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testTestAuthMethod() throws Exception {
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
for ( AuthenticationMethod am : AuthenticationMethod.values()) {
if (am.getAuthMethod() != null) {
ugi.setAuthenticationMethod(am.getAuthMethod());
assertEquals(am,ugi.getAuthenticationMethod());
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testEquals() throws Exception {
UserGroupInformation uugi=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
assertEquals(uugi,uugi);
UserGroupInformation ugi2=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES);
assertFalse(uugi.equals(ugi2));
assertFalse(uugi.hashCode() == ugi2.hashCode());
UserGroupInformation ugi3=new UserGroupInformation(uugi.getSubject());
assertEquals(uugi,ugi3);
assertEquals(uugi.hashCode(),ugi3.hashCode());
}
InternalCallVerifier EqualityVerifier
@Test public void testCommandHelpExitsNormally() throws Exception {
for ( String cmd : Arrays.asList("create","list","delete")) {
CredentialShell shell=new CredentialShell();
shell.setConf(new Configuration());
assertEquals("Expected help argument on " + cmd + " to return 0",0,shell.init(new String[]{cmd,"-help"}));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidProvider() throws Exception {
String[] args1={"create","credential1","-value","p@ssw0rd","-provider","sdff://file/tmp/credstore.jceks"};
int rc=0;
CredentialShell cs=new CredentialShell();
cs.setConf(new Configuration());
rc=cs.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPromptForCredential() throws Exception {
String[] args1={"create","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
ArrayList passwords=new ArrayList();
passwords.add("p@ssw0rd");
passwords.add("p@ssw0rd");
int rc=0;
CredentialShell shell=new CredentialShell();
shell.setConf(new Configuration());
shell.setPasswordReader(new MockPasswordReader(passwords));
rc=shell.run(args1);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "created."));
String[] args2={"delete","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
rc=shell.run(args2);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted."));
}
InternalCallVerifier EqualityVerifier
@Test public void testEmptyArgList() throws Exception {
CredentialShell shell=new CredentialShell();
shell.setConf(new Configuration());
assertEquals(1,shell.init(new String[0]));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransientProviderOnlyConfig() throws Exception {
String[] args1={"create","credential1"};
int rc=0;
CredentialShell cs=new CredentialShell();
Configuration config=new Configuration();
config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"user:///");
cs.setConf(config);
rc=cs.run(args1);
assertEquals(1,rc);
assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCredentialSuccessfulLifecycle() throws Exception {
outContent.reset();
String[] args1={"create","credential1","-value","p@ssw0rd","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
int rc=0;
CredentialShell cs=new CredentialShell();
cs.setConf(new Configuration());
rc=cs.run(args1);
assertEquals(outContent.toString(),0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "created."));
outContent.reset();
String[] args2={"list","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
rc=cs.run(args2);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("credential1"));
outContent.reset();
String[] args4={"delete","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
rc=cs.run(args4);
assertEquals(0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted."));
outContent.reset();
String[] args5={"list","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
rc=cs.run(args5);
assertEquals(0,rc);
assertFalse(outContent.toString(),outContent.toString().contains("credential1"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testTransientProviderWarning() throws Exception {
String[] args1={"create","credential1","-value","p@ssw0rd","-provider","user:///"};
int rc=0;
CredentialShell cs=new CredentialShell();
cs.setConf(new Configuration());
rc=cs.run(args1);
assertEquals(outContent.toString(),0,rc);
assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider."));
String[] args2={"delete","credential1","-provider","user:///"};
rc=cs.run(args2);
assertEquals(outContent.toString(),0,rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted."));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPromptForCredentialWithEmptyPasswd() throws Exception {
String[] args1={"create","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"};
ArrayList passwords=new ArrayList();
passwords.add(null);
passwords.add("p@ssw0rd");
int rc=0;
CredentialShell shell=new CredentialShell();
shell.setConf(new Configuration());
shell.setPasswordReader(new MockPasswordReader(passwords));
rc=shell.run(args1);
assertEquals(outContent.toString(),1,rc);
assertTrue(outContent.toString().contains("Passwords don't match"));
}
InternalCallVerifier EqualityVerifier
@Test public void testCredentialEntry() throws Exception {
char[] key1=new char[]{1,2,3,4};
CredentialProvider.CredentialEntry obj=new CredentialProvider.CredentialEntry("cred1",key1);
assertEquals("cred1",obj.getAlias());
assertArrayEquals(new char[]{1,2,3,4},obj.getCredential());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFactory() throws Exception {
Configuration conf=new Configuration();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,UserProvider.SCHEME_NAME + ":///," + JavaKeyStoreProvider.SCHEME_NAME+ "://file"+ tmpDir+ "/test.jks");
List providers=CredentialProviderFactory.getProviders(conf);
assertEquals(2,providers.size());
assertEquals(UserProvider.class,providers.get(0).getClass());
assertEquals(JavaKeyStoreProvider.class,providers.get(1).getClass());
assertEquals(UserProvider.SCHEME_NAME + ":///",providers.get(0).toString());
assertEquals(JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks",providers.get(1).toString());
}
InternalCallVerifier BooleanVerifier
@Test public void testJksProvider() throws Exception {
Configuration conf=new Configuration();
final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks";
File file=new File(tmpDir,"test.jks");
file.delete();
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,ourUrl);
checkSpecificProvider(conf,ourUrl);
Path path=ProviderUtils.unnestUri(new URI(ourUrl));
FileSystem fs=path.getFileSystem(conf);
FileStatus s=fs.getFileStatus(path);
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist",file.isFile());
fs.setPermission(path,new FsPermission("777"));
checkPermissionRetention(conf,ourUrl,path);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testExtractTokenFail() throws Exception {
HttpURLConnection conn=Mockito.mock(HttpURLConnection.class);
Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_UNAUTHORIZED);
String tokenStr="foo";
Map> headers=new HashMap>();
List cookies=new ArrayList();
cookies.add(AuthenticatedURL.AUTH_COOKIE + "=" + tokenStr);
headers.put("Set-Cookie",cookies);
Mockito.when(conn.getHeaderFields()).thenReturn(headers);
AuthenticatedURL.Token token=new AuthenticatedURL.Token();
token.set("bar");
try {
AuthenticatedURL.extractToken(conn,token);
Assert.fail();
}
catch ( AuthenticationException ex) {
Assert.assertFalse(token.isSet());
}
catch ( Exception ex) {
Assert.fail();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testToken() throws Exception {
AuthenticatedURL.Token token=new AuthenticatedURL.Token();
Assert.assertFalse(token.isSet());
token=new AuthenticatedURL.Token("foo");
Assert.assertTrue(token.isSet());
Assert.assertEquals("foo",token.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testNotAuthenticated() throws Exception {
AuthenticatorTestCase auth=new AuthenticatorTestCase();
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
auth.start();
try {
URL url=new URL(auth.getBaseURL());
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
Assert.assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null);
}
finally {
auth.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAnonymousDisallowed() throws Exception {
AuthenticatorTestCase auth=new AuthenticatorTestCase();
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false));
auth.start();
try {
URL url=new URL(auth.getBaseURL());
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode());
Assert.assertEquals("Anonymous requests are disallowed",conn.getResponseMessage());
}
finally {
auth.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAnonymousAllowed() throws Exception {
AuthenticatorTestCase auth=new AuthenticatorTestCase();
AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true));
auth.start();
try {
URL url=new URL(auth.getBaseURL());
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
conn.connect();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
}
finally {
auth.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testNonDefaultNonBrowserUserAgentAsBrowser() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
if (handler != null) {
handler.destroy();
handler=null;
}
handler=getNewAuthenticationHandler();
Properties props=getDefaultProperties();
props.setProperty("alt-kerberos.non-browser.user-agents","foo, bar");
try {
handler.init(props);
}
catch ( Exception ex) {
handler=null;
throw ex;
}
Mockito.when(request.getHeader("User-Agent")).thenReturn("blah");
AuthenticationToken token=handler.authenticate(request,response);
Assert.assertEquals("A",token.getUserName());
Assert.assertEquals("B",token.getName());
Assert.assertEquals(getExpectedType(),token.getType());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testAlternateAuthenticationAsBrowser() throws Exception {
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getHeader("User-Agent")).thenReturn("Some Browser");
AuthenticationToken token=handler.authenticate(request,response);
Assert.assertEquals("A",token.getUserName());
Assert.assertEquals("B",token.getName());
Assert.assertEquals(getExpectedType(),token.getType());
}
InternalCallVerifier EqualityVerifier
@Test public void testGetConfiguration() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("");
Mockito.when(config.getInitParameter("a")).thenReturn("A");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("a")).elements());
Properties props=filter.getConfiguration("",config);
Assert.assertEquals("A",props.getProperty("a"));
config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo");
Mockito.when(config.getInitParameter("foo.a")).thenReturn("A");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("foo.a")).elements());
props=filter.getConfiguration("foo.",config);
Assert.assertEquals("A",props.getProperty("a"));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testInit() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn((new Long(TOKEN_VALIDITY_SEC)).toString());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertEquals(PseudoAuthenticationHandler.class,filter.getAuthenticationHandler().getClass());
Assert.assertTrue(filter.isRandomSecret());
Assert.assertFalse(filter.isCustomSignerSecretProvider());
Assert.assertNull(filter.getCookieDomain());
Assert.assertNull(filter.getCookiePath());
Assert.assertEquals(TOKEN_VALIDITY_SEC,filter.getValidity());
}
finally {
filter.destroy();
}
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET)).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertFalse(filter.isRandomSecret());
Assert.assertFalse(filter.isCustomSignerSecretProvider());
}
finally {
filter.destroy();
}
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET)).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(new SignerSecretProvider(){
@Override public void init( Properties config, long tokenValidity){
}
@Override public byte[] getCurrentSecret(){
return null;
}
@Override public byte[][] getAllSecrets(){
return null;
}
}
);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertFalse(filter.isRandomSecret());
Assert.assertTrue(filter.isCustomSignerSecretProvider());
}
finally {
filter.destroy();
}
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.COOKIE_DOMAIN,AuthenticationFilter.COOKIE_PATH)).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertEquals(".foo.com",filter.getCookieDomain());
Assert.assertEquals("/bar",filter.getCookiePath());
}
finally {
filter.destroy();
}
DummyAuthenticationHandler.reset();
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertTrue(DummyAuthenticationHandler.init);
}
finally {
filter.destroy();
Assert.assertTrue(DummyAuthenticationHandler.destroy);
}
filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
filter.init(config);
}
catch ( ServletException ex) {
}
finally {
Assert.assertEquals(KerberosAuthenticationHandler.class,filter.getAuthenticationHandler().getClass());
filter.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testGetToken() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET,"management.operation.return")).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
AuthenticationToken token=new AuthenticationToken("u","p",DummyAuthenticationHandler.TYPE);
token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
Signer signer=new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned=signer.sign(token.toString());
Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
AuthenticationToken newToken=filter.getToken(request);
Assert.assertEquals(token.toString(),newToken.toString());
}
finally {
filter.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testInitCaseSensitivity() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("SimPle");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn((new Long(TOKEN_VALIDITY_SEC)).toString());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
Assert.assertEquals(PseudoAuthenticationHandler.class,filter.getAuthenticationHandler().getClass());
}
finally {
filter.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testGetRequestURL() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
Mockito.when(request.getQueryString()).thenReturn("a=A&b=B");
Assert.assertEquals("http://foo:8080/bar?a=A&b=B",filter.getRequestURL(request));
}
finally {
filter.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testDoFilterAuthenticated() throws Exception {
AuthenticationFilter filter=new AuthenticationFilter();
try {
FilterConfig config=Mockito.mock(FilterConfig.class);
Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true");
Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName());
Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements());
ServletContext context=Mockito.mock(ServletContext.class);
Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
Mockito.when(config.getServletContext()).thenReturn(context);
filter.init(config);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar"));
AuthenticationToken token=new AuthenticationToken("u","p","t");
token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
Signer signer=new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned=signer.sign(token.toString());
Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned);
Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
FilterChain chain=Mockito.mock(FilterChain.class);
Mockito.doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocation) throws Throwable {
Object[] args=invocation.getArguments();
HttpServletRequest request=(HttpServletRequest)args[0];
Assert.assertEquals("u",request.getRemoteUser());
Assert.assertEquals("p",request.getUserPrincipal().getName());
return null;
}
}
).when(chain).doFilter(Mockito.anyObject(),Mockito.anyObject());
filter.doFilter(request,response,chain);
}
finally {
filter.destroy();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetters() throws Exception {
long expires=System.currentTimeMillis() + 50;
AuthenticationToken token=new AuthenticationToken("u","p","t");
token.setExpires(expires);
Assert.assertEquals("u",token.getUserName());
Assert.assertEquals("p",token.getName());
Assert.assertEquals("t",token.getType());
Assert.assertEquals(expires,token.getExpires());
Assert.assertFalse(token.isExpired());
Thread.sleep(70);
Assert.assertTrue(token.isExpired());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testToStringAndParse() throws Exception {
long expires=System.currentTimeMillis() + 50;
AuthenticationToken token=new AuthenticationToken("u","p","t");
token.setExpires(expires);
String str=token.toString();
token=AuthenticationToken.parse(str);
Assert.assertEquals("p",token.getName());
Assert.assertEquals("t",token.getType());
Assert.assertEquals(expires,token.getExpires());
Assert.assertFalse(token.isExpired());
Thread.sleep(70);
Assert.assertTrue(token.isExpired());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testDynamicPrincipalDiscovery() throws Exception {
String[] keytabUsers=new String[]{"HTTP/host1","HTTP/host2","HTTP2/host1","XHTTP/host"};
String keytab=KerberosTestUtils.getKeytabFile();
getKdc().createPrincipal(new File(keytab),keytabUsers);
handler.destroy();
Properties props=new Properties();
props.setProperty(KerberosAuthenticationHandler.KEYTAB,keytab);
props.setProperty(KerberosAuthenticationHandler.PRINCIPAL,"*");
handler=getNewAuthenticationHandler();
handler.init(props);
Assert.assertEquals(KerberosTestUtils.getKeytabFile(),handler.getKeytab());
Set loginPrincipals=handler.getPrincipals();
for ( String user : keytabUsers) {
Principal principal=new KerberosPrincipal(user + "@" + KerberosTestUtils.getRealm());
boolean expected=user.startsWith("HTTP/");
Assert.assertEquals("checking for " + user,expected,loginPrincipals.contains(principal));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testInit() throws Exception {
Assert.assertEquals(KerberosTestUtils.getKeytabFile(),handler.getKeytab());
Set principals=handler.getPrincipals();
Principal expectedPrincipal=new KerberosPrincipal(KerberosTestUtils.getServerPrincipal());
Assert.assertTrue(principals.contains(expectedPrincipal));
Assert.assertEquals(1,principals.size());
}
InternalCallVerifier EqualityVerifier
@Test public void testInit() throws Exception {
PseudoAuthenticationHandler handler=new PseudoAuthenticationHandler();
try {
Properties props=new Properties();
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED,"false");
handler.init(props);
Assert.assertEquals(false,handler.getAcceptAnonymous());
}
finally {
handler.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testAnonymousOn() throws Exception {
PseudoAuthenticationHandler handler=new PseudoAuthenticationHandler();
try {
Properties props=new Properties();
props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED,"true");
handler.init(props);
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
HttpServletResponse response=Mockito.mock(HttpServletResponse.class);
AuthenticationToken token=handler.authenticate(request,response);
Assert.assertEquals(AuthenticationToken.ANONYMOUS,token);
}
finally {
handler.destroy();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetAndRollSecrets() throws Exception {
long rolloverFrequency=15 * 1000;
long seed=System.currentTimeMillis();
Random rand=new Random(seed);
byte[] secret1=Long.toString(rand.nextLong()).getBytes();
byte[] secret2=Long.toString(rand.nextLong()).getBytes();
byte[] secret3=Long.toString(rand.nextLong()).getBytes();
RandomSignerSecretProvider secretProvider=new RandomSignerSecretProvider(seed);
try {
secretProvider.init(null,rolloverFrequency);
byte[] currentSecret=secretProvider.getCurrentSecret();
byte[][] allSecrets=secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret1,currentSecret);
Assert.assertEquals(2,allSecrets.length);
Assert.assertArrayEquals(secret1,allSecrets[0]);
Assert.assertNull(allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret=secretProvider.getCurrentSecret();
allSecrets=secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret2,currentSecret);
Assert.assertEquals(2,allSecrets.length);
Assert.assertArrayEquals(secret2,allSecrets[0]);
Assert.assertArrayEquals(secret1,allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret=secretProvider.getCurrentSecret();
allSecrets=secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret3,currentSecret);
Assert.assertEquals(2,allSecrets.length);
Assert.assertArrayEquals(secret3,allSecrets[0]);
Assert.assertArrayEquals(secret2,allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
}
finally {
secretProvider.destroy();
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetAndRollSecrets() throws Exception {
long rolloverFrequency=15 * 1000;
byte[] secret1="doctor".getBytes();
byte[] secret2="who".getBytes();
byte[] secret3="tardis".getBytes();
TRolloverSignerSecretProvider secretProvider=new TRolloverSignerSecretProvider(new byte[][]{secret1,secret2,secret3});
try {
secretProvider.init(null,rolloverFrequency);
byte[] currentSecret=secretProvider.getCurrentSecret();
byte[][] allSecrets=secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret1,currentSecret);
Assert.assertEquals(2,allSecrets.length);
Assert.assertArrayEquals(secret1,allSecrets[0]);
Assert.assertNull(allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret=secretProvider.getCurrentSecret();
allSecrets=secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret2,currentSecret);
Assert.assertEquals(2,allSecrets.length);
Assert.assertArrayEquals(secret2,allSecrets[0]);
Assert.assertArrayEquals(secret1,allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
currentSecret=secretProvider.getCurrentSecret();
allSecrets=secretProvider.getAllSecrets();
Assert.assertArrayEquals(secret3,currentSecret);
Assert.assertEquals(2,allSecrets.length);
Assert.assertArrayEquals(secret3,allSecrets[0]);
Assert.assertArrayEquals(secret2,allSecrets[1]);
Thread.sleep(rolloverFrequency + 2000);
}
finally {
secretProvider.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testSignature() throws Exception {
Signer signer=new Signer(new StringSignerSecretProvider("secret"));
String s1=signer.sign("ok");
String s2=signer.sign("ok");
String s3=signer.sign("wrong");
Assert.assertEquals(s1,s2);
Assert.assertNotEquals(s1,s3);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleSecrets() throws Exception {
TestSignerSecretProvider secretProvider=new TestSignerSecretProvider();
Signer signer=new Signer(secretProvider);
secretProvider.setCurrentSecret("secretB");
String t1="test";
String s1=signer.sign(t1);
String e1=signer.verifyAndExtract(s1);
Assert.assertEquals(t1,e1);
secretProvider.setPreviousSecret("secretA");
String t2="test";
String s2=signer.sign(t2);
String e2=signer.verifyAndExtract(s2);
Assert.assertEquals(t2,e2);
Assert.assertEquals(s1,s2);
secretProvider.setCurrentSecret("secretC");
secretProvider.setPreviousSecret("secretB");
String t3="test";
String s3=signer.sign(t3);
String e3=signer.verifyAndExtract(s3);
Assert.assertEquals(t3,e3);
Assert.assertNotEquals(s1,s3);
String e1b=signer.verifyAndExtract(s1);
Assert.assertEquals(t1,e1b);
secretProvider.setCurrentSecret("secretD");
secretProvider.setPreviousSecret("secretC");
try {
signer.verifyAndExtract(s1);
Assert.fail();
}
catch ( SignerException ex) {
}
}
InternalCallVerifier EqualityVerifier
@Test public void testVerify() throws Exception {
Signer signer=new Signer(new StringSignerSecretProvider("secret"));
String t="test";
String s=signer.sign(t);
String e=signer.verifyAndExtract(s);
Assert.assertEquals(t,e);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetSecrets() throws Exception {
String secretStr="secret";
StringSignerSecretProvider secretProvider=new StringSignerSecretProvider(secretStr);
secretProvider.init(null,-1);
byte[] secretBytes=secretStr.getBytes();
Assert.assertArrayEquals(secretBytes,secretProvider.getCurrentSecret());
byte[][] allSecrets=secretProvider.getAllSecrets();
Assert.assertEquals(1,allSecrets.length);
Assert.assertArrayEquals(secretBytes,allSecrets[0]);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAccessControlList() throws Exception {
AccessControlList acl;
Collection users;
Collection groups;
acl=new AccessControlList("drwho tardis");
users=acl.getUsers();
assertEquals(users.size(),1);
assertEquals(users.iterator().next(),"drwho");
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertEquals(groups.iterator().next(),"tardis");
acl=new AccessControlList("drwho");
users=acl.getUsers();
assertEquals(users.size(),1);
assertEquals(users.iterator().next(),"drwho");
groups=acl.getGroups();
assertEquals(groups.size(),0);
acl=new AccessControlList("drwho ");
users=acl.getUsers();
assertEquals(users.size(),1);
assertEquals(users.iterator().next(),"drwho");
groups=acl.getGroups();
assertEquals(groups.size(),0);
acl=new AccessControlList(" tardis");
users=acl.getUsers();
assertEquals(users.size(),0);
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertEquals(groups.iterator().next(),"tardis");
Iterator iter;
acl=new AccessControlList("drwho,joe tardis, users");
users=acl.getUsers();
assertEquals(users.size(),2);
iter=users.iterator();
assertEquals(iter.next(),"drwho");
assertEquals(iter.next(),"joe");
groups=acl.getGroups();
assertEquals(groups.size(),2);
iter=groups.iterator();
assertEquals(iter.next(),"tardis");
assertEquals(iter.next(),"users");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test addUser/Group and removeUser/Group api.
*/
@Test public void testAddRemoveAPI(){
AccessControlList acl;
Collection users;
Collection groups;
acl=new AccessControlList(" ");
assertEquals(0,acl.getUsers().size());
assertEquals(0,acl.getGroups().size());
assertEquals(" ",acl.getAclString());
acl.addUser("drwho");
users=acl.getUsers();
assertEquals(users.size(),1);
assertEquals(users.iterator().next(),"drwho");
assertEquals("drwho ",acl.getAclString());
acl.addGroup("tardis");
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertEquals(groups.iterator().next(),"tardis");
assertEquals("drwho tardis",acl.getAclString());
acl.addUser("joe");
acl.addGroup("users");
users=acl.getUsers();
assertEquals(users.size(),2);
Iterator iter=users.iterator();
assertEquals(iter.next(),"drwho");
assertEquals(iter.next(),"joe");
groups=acl.getGroups();
assertEquals(groups.size(),2);
iter=groups.iterator();
assertEquals(iter.next(),"tardis");
assertEquals(iter.next(),"users");
assertEquals("drwho,joe tardis,users",acl.getAclString());
acl.removeUser("joe");
acl.removeGroup("users");
users=acl.getUsers();
assertEquals(users.size(),1);
assertFalse(users.contains("joe"));
groups=acl.getGroups();
assertEquals(groups.size(),1);
assertFalse(groups.contains("users"));
assertEquals("drwho tardis",acl.getAclString());
acl.removeGroup("tardis");
groups=acl.getGroups();
assertEquals(0,groups.size());
assertFalse(groups.contains("tardis"));
assertEquals("drwho ",acl.getAclString());
acl.removeUser("drwho");
assertEquals(0,users.size());
assertFalse(users.contains("drwho"));
assertEquals(0,acl.getGroups().size());
assertEquals(0,acl.getUsers().size());
assertEquals(" ",acl.getAclString());
}
InternalCallVerifier BooleanVerifier
/**
* Tests adding user/group to an wild card acl.
*/
@Test public void testAddRemoveToWildCardACL(){
AccessControlList acl=new AccessControlList(" * ");
assertTrue(acl.isAllAllowed());
UserGroupInformation drwho=UserGroupInformation.createUserForTesting("drwho@EXAMPLE.COM",new String[]{"aliens"});
UserGroupInformation drwho2=UserGroupInformation.createUserForTesting("drwho2@EXAMPLE.COM",new String[]{"tardis"});
acl.addUser("drwho");
assertTrue(acl.isAllAllowed());
assertFalse(acl.getAclString().contains("drwho"));
acl.addGroup("tardis");
assertTrue(acl.isAllAllowed());
assertFalse(acl.getAclString().contains("tardis"));
acl.removeUser("drwho");
assertTrue(acl.isAllAllowed());
assertUserAllowed(drwho,acl);
acl.removeGroup("tardis");
assertTrue(acl.isAllAllowed());
assertUserAllowed(drwho2,acl);
}
InternalCallVerifier EqualityVerifier
@Test public void testDefaultAcl(){
ServiceAuthorizationManager serviceAuthorizationManager=new ServiceAuthorizationManager();
Configuration conf=new Configuration();
conf.set(ACL_CONFIG,"user1 group1");
serviceAuthorizationManager.refresh(conf,new TestPolicyProvider());
AccessControlList acl=serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
assertEquals("user1 group1",acl.getAclString());
acl=serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
assertEquals(AccessControlList.WILDCARD_ACL_VALUE,acl.getAclString());
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL,"user2 group2");
serviceAuthorizationManager.refresh(conf,new TestPolicyProvider());
acl=serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
assertEquals("user1 group1",acl.getAclString());
acl=serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
assertEquals("user2 group2",acl.getAclString());
}
InternalCallVerifier EqualityVerifier
@Test public void testReloadMissingTrustStore() throws Exception {
KeyPair kp=generateKeyPair("RSA");
cert1=generateCertificate("CN=Cert1",kp,30,"SHA1withRSA");
cert2=generateCertificate("CN=Cert2",kp,30,"SHA1withRSA");
String truststoreLocation=BASEDIR + "/testmissing.jks";
createTrustStore(truststoreLocation,"password","cert1",cert1);
ReloadingX509TrustManager tm=new ReloadingX509TrustManager("jks",truststoreLocation,"password",10);
try {
tm.init();
assertEquals(1,tm.getAcceptedIssuers().length);
X509Certificate cert=tm.getAcceptedIssuers()[0];
new File(truststoreLocation).delete();
Thread.sleep((tm.getReloadInterval() + 200));
assertEquals(1,tm.getAcceptedIssuers().length);
assertEquals(cert,tm.getAcceptedIssuers()[0]);
}
finally {
tm.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testReload() throws Exception {
KeyPair kp=generateKeyPair("RSA");
cert1=generateCertificate("CN=Cert1",kp,30,"SHA1withRSA");
cert2=generateCertificate("CN=Cert2",kp,30,"SHA1withRSA");
String truststoreLocation=BASEDIR + "/testreload.jks";
createTrustStore(truststoreLocation,"password","cert1",cert1);
ReloadingX509TrustManager tm=new ReloadingX509TrustManager("jks",truststoreLocation,"password",10);
try {
tm.init();
assertEquals(1,tm.getAcceptedIssuers().length);
Thread.sleep((tm.getReloadInterval() + 1000));
Map certs=new HashMap();
certs.put("cert1",cert1);
certs.put("cert2",cert2);
createTrustStore(truststoreLocation,"password",certs);
assertEquals(10,tm.getReloadInterval());
Thread.sleep((tm.getReloadInterval() + 200));
assertEquals(2,tm.getAcceptedIssuers().length);
}
finally {
tm.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testReloadCorruptTrustStore() throws Exception {
KeyPair kp=generateKeyPair("RSA");
cert1=generateCertificate("CN=Cert1",kp,30,"SHA1withRSA");
cert2=generateCertificate("CN=Cert2",kp,30,"SHA1withRSA");
String truststoreLocation=BASEDIR + "/testcorrupt.jks";
createTrustStore(truststoreLocation,"password","cert1",cert1);
ReloadingX509TrustManager tm=new ReloadingX509TrustManager("jks",truststoreLocation,"password",10);
try {
tm.init();
assertEquals(1,tm.getAcceptedIssuers().length);
X509Certificate cert=tm.getAcceptedIssuers()[0];
OutputStream os=new FileOutputStream(truststoreLocation);
os.write(1);
os.close();
new File(truststoreLocation).setLastModified(System.currentTimeMillis() - 1000);
Thread.sleep((tm.getReloadInterval() + 200));
assertEquals(1,tm.getAcceptedIssuers().length);
assertEquals(cert,tm.getAcceptedIssuers()[0]);
}
finally {
tm.destroy();
}
}
InternalCallVerifier NullVerifier ExceptionVerifier HybridVerifier
@Test(expected=IllegalStateException.class) public void clientMode() throws Exception {
Configuration conf=createConfiguration(false,true);
SSLFactory sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf);
try {
sslFactory.init();
Assert.assertNotNull(sslFactory.createSSLSocketFactory());
Assert.assertNotNull(sslFactory.getHostnameVerifier());
sslFactory.createSSLServerSocketFactory();
}
finally {
sslFactory.destroy();
}
}
InternalCallVerifier EqualityVerifier
@Test public void validHostnameVerifier() throws Exception {
Configuration conf=createConfiguration(false,true);
conf.unset(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY);
SSLFactory sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf);
sslFactory.init();
Assert.assertEquals("DEFAULT",sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"ALLOW_ALL");
sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf);
sslFactory.init();
Assert.assertEquals("ALLOW_ALL",sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"DEFAULT_AND_LOCALHOST");
sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf);
sslFactory.init();
Assert.assertEquals("DEFAULT_AND_LOCALHOST",sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"STRICT");
sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf);
sslFactory.init();
Assert.assertEquals("STRICT",sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"STRICT_IE6");
sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf);
sslFactory.init();
Assert.assertEquals("STRICT_IE6",sslFactory.getHostnameVerifier().toString());
sslFactory.destroy();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testParallelDelegationTokenCreation() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(2000,24 * 60 * 60* 1000,7 * 24 * 60* 60* 1000,2000);
try {
dtSecretManager.startThreads();
int numThreads=100;
final int numTokensPerThread=100;
class tokenIssuerThread implements Runnable {
@Override public void run(){
for (int i=0; i < numTokensPerThread; i++) {
generateDelegationToken(dtSecretManager,"auser","arenewer");
try {
Thread.sleep(250);
}
catch ( Exception e) {
}
}
}
}
Thread[] issuers=new Thread[numThreads];
for (int i=0; i < numThreads; i++) {
issuers[i]=new Daemon(new tokenIssuerThread());
issuers[i].start();
}
for (int i=0; i < numThreads; i++) {
issuers[i].join();
}
Map tokenCache=dtSecretManager.getAllTokens();
Assert.assertEquals(numTokensPerThread * numThreads,tokenCache.size());
Iterator iter=tokenCache.keySet().iterator();
while (iter.hasNext()) {
TestDelegationTokenIdentifier id=iter.next();
DelegationTokenInformation info=tokenCache.get(id);
Assert.assertTrue(info != null);
DelegationKey key=dtSecretManager.getKey(id);
Assert.assertTrue(key != null);
byte[] storedPassword=dtSecretManager.retrievePassword(id);
byte[] password=dtSecretManager.createPassword(id,key);
Assert.assertTrue(Arrays.equals(password,storedPassword));
dtSecretManager.verifyToken(id,password);
}
}
finally {
dtSecretManager.stopThreads();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testSerialization() throws Exception {
TestDelegationTokenIdentifier origToken=new TestDelegationTokenIdentifier(new Text("alice"),new Text("bob"),new Text("colin"));
TestDelegationTokenIdentifier newToken=new TestDelegationTokenIdentifier();
origToken.setIssueDate(123);
origToken.setMasterKeyId(321);
origToken.setMaxDate(314);
origToken.setSequenceNumber(12345);
DataInputBuffer inBuf=new DataInputBuffer();
DataOutputBuffer outBuf=new DataOutputBuffer();
origToken.write(outBuf);
inBuf.reset(outBuf.getData(),0,outBuf.getLength());
newToken.readFields(inBuf);
assertEquals("alice",newToken.getUser().getUserName());
assertEquals(new Text("bob"),newToken.getRenewer());
assertEquals("colin",newToken.getUser().getRealUser().getUserName());
assertEquals(123,newToken.getIssueDate());
assertEquals(321,newToken.getMasterKeyId());
assertEquals(314,newToken.getMaxDate());
assertEquals(12345,newToken.getSequenceNumber());
assertEquals(origToken,newToken);
}
InternalCallVerifier BooleanVerifier
@Test public void testCancelDelegationToken() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,10 * 1000,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
final Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker");
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"FakeCanceller");
return null;
}
}
,AccessControlException.class);
dtSecretManager.cancelToken(token,"JobTracker");
Assert.assertTrue(dtSecretManager.isRemoveStoredTokenCalled);
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"JobTracker");
return null;
}
}
,InvalidToken.class);
}
finally {
dtSecretManager.stopThreads();
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetUserWithOwnerEqualsReal(){
Text owner=new Text("owner");
TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(owner,null,owner);
UserGroupInformation ugi=ident.getUser();
assertNull(ugi.getRealUser());
assertEquals("owner",ugi.getUserName());
assertEquals(AuthenticationMethod.TOKEN,ugi.getAuthenticationMethod());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDelegationTokenSecretManager() throws Exception {
final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,3 * 1000,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
final Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker");
Assert.assertTrue(dtSecretManager.isStoreNewTokenCalled);
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"FakeRenewer");
return null;
}
}
,AccessControlException.class);
long time=dtSecretManager.renewToken(token,"JobTracker");
Assert.assertTrue(dtSecretManager.isUpdateStoredTokenCalled);
assertTrue("renew time is in future",time > Time.now());
TestDelegationTokenIdentifier identifier=new TestDelegationTokenIdentifier();
byte[] tokenId=token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
LOG.info("Sleep to expire the token");
Thread.sleep(2000);
try {
dtSecretManager.retrievePassword(identifier);
Assert.fail("Token should have expired");
}
catch ( InvalidToken e) {
}
dtSecretManager.renewToken(token,"JobTracker");
LOG.info("Sleep beyond the max lifetime");
Thread.sleep(2000);
shouldThrow(new PrivilegedExceptionAction(){
@Override public Object run() throws Exception {
dtSecretManager.renewToken(token,"JobTracker");
return null;
}
}
,InvalidToken.class);
}
finally {
dtSecretManager.stopThreads();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testDelegationTokenNullRenewer() throws Exception {
TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,10 * 1000,1 * 1000,3600000);
dtSecretManager.startThreads();
TestDelegationTokenIdentifier dtId=new TestDelegationTokenIdentifier(new Text("theuser"),null,null);
Token token=new Token(dtId,dtSecretManager);
Assert.assertTrue(token != null);
try {
dtSecretManager.renewToken(token,"");
Assert.fail("Renewal must not succeed");
}
catch ( IOException e) {
}
}
InternalCallVerifier NullVerifier
@Test public void testGetUserNullOwner(){
TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(null,null,null);
UserGroupInformation ugi=ident.getUser();
assertNull(ugi);
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetUserWithOwnerAndReal(){
Text owner=new Text("owner");
Text realUser=new Text("realUser");
TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(owner,null,realUser);
UserGroupInformation ugi=ident.getUser();
assertNotNull(ugi.getRealUser());
assertNull(ugi.getRealUser().getRealUser());
assertEquals("owner",ugi.getUserName());
assertEquals("realUser",ugi.getRealUser().getUserName());
assertEquals(AuthenticationMethod.PROXY,ugi.getAuthenticationMethod());
assertEquals(AuthenticationMethod.TOKEN,ugi.getRealUser().getAuthenticationMethod());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testDelegationTokenSelector() throws Exception {
TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,10 * 1000,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
AbstractDelegationTokenSelector ds=new AbstractDelegationTokenSelector(KIND);
Token token1=generateDelegationToken(dtSecretManager,"SomeUser1","JobTracker");
token1.setService(new Text("MY-SERVICE1"));
Token token2=generateDelegationToken(dtSecretManager,"SomeUser2","JobTracker");
token2.setService(new Text("MY-SERVICE2"));
List> tokens=new ArrayList>();
tokens.add(token1);
tokens.add(token2);
Token t=ds.selectToken(new Text("MY-SERVICE1"),tokens);
Assert.assertEquals(t,token1);
}
finally {
dtSecretManager.stopThreads();
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetUserWithOwner(){
TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(new Text("owner"),null,null);
UserGroupInformation ugi=ident.getUser();
assertNull(ugi.getRealUser());
assertEquals("owner",ugi.getUserName());
assertEquals(AuthenticationMethod.TOKEN,ugi.getAuthenticationMethod());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testRollMasterKey() throws Exception {
TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(800,800,1 * 1000,3600000);
try {
dtSecretManager.startThreads();
Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker");
byte[] oldPasswd=token.getPassword();
int prevNumKeys=dtSecretManager.getAllKeys().length;
dtSecretManager.rollMasterKey();
Assert.assertTrue(dtSecretManager.isStoreNewMasterKeyCalled);
int currNumKeys=dtSecretManager.getAllKeys().length;
Assert.assertEquals((currNumKeys - prevNumKeys) >= 1,true);
ByteArrayInputStream bi=new ByteArrayInputStream(token.getIdentifier());
TestDelegationTokenIdentifier identifier=dtSecretManager.createIdentifier();
identifier.readFields(new DataInputStream(bi));
byte[] newPasswd=dtSecretManager.retrievePassword(identifier);
Assert.assertEquals(oldPasswd,newPasswd);
while (!dtSecretManager.isRemoveStoredMasterKeyCalled) {
Thread.sleep(200);
}
}
finally {
dtSecretManager.stopThreads();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testDTManager() throws Exception {
DelegationTokenManager tm=new DelegationTokenManager(new Text("foo"),DAY_IN_SECS,DAY_IN_SECS,DAY_IN_SECS,DAY_IN_SECS);
tm.init();
Token token=tm.createToken(UserGroupInformation.getCurrentUser(),"foo");
Assert.assertNotNull(token);
tm.verifyToken(token);
Assert.assertTrue(tm.renewToken(token,"foo") > System.currentTimeMillis());
tm.cancelToken(token,"foo");
try {
tm.verifyToken(token);
Assert.fail();
}
catch ( IOException ex) {
}
catch ( Exception ex) {
Assert.fail();
}
tm.destroy();
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testExternalDelegationTokenSecretManager() throws Exception {
DummyDelegationTokenSecretManager secretMgr=new DummyDelegationTokenSecretManager();
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class),"/*",0);
context.addServlet(new ServletHolder(PingServlet.class),"/bar");
try {
secretMgr.startThreads();
context.setAttribute(DelegationTokenAuthenticationFilter.DELEGATION_TOKEN_SECRET_MANAGER_ATTR,secretMgr);
jetty.start();
URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo");
DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
aUrl.getDelegationToken(authURL,token,FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("fooKind"),token.getDelegationToken().getKind());
}
finally {
jetty.stop();
secretMgr.stopThreads();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationTokenAuthenticatorCalls() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class),"/*",0);
context.addServlet(new ServletHolder(PingServlet.class),"/bar");
try {
jetty.start();
URL nonAuthURL=new URL(getJettyURL() + "/foo/bar");
URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo");
URL authURL2=new URL(getJettyURL() + "/foo/bar?authenticated=bar");
DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
try {
aUrl.getDelegationToken(nonAuthURL,token,FOO_USER);
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL,token,FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),token.getDelegationToken().getKind());
aUrl.renewDelegationToken(authURL,token);
try {
aUrl.renewDelegationToken(nonAuthURL,token);
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL,token,FOO_USER);
try {
aUrl.renewDelegationToken(authURL2,token);
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(authURL,token,FOO_USER);
aUrl.cancelDelegationToken(authURL,token);
aUrl.getDelegationToken(authURL,token,FOO_USER);
aUrl.cancelDelegationToken(nonAuthURL,token);
aUrl.getDelegationToken(authURL,token,FOO_USER);
try {
aUrl.renewDelegationToken(nonAuthURL,token);
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
}
finally {
jetty.stop();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRawHttpCalls() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class),"/*",0);
context.addServlet(new ServletHolder(PingServlet.class),"/bar");
try {
jetty.start();
URL nonAuthURL=new URL(getJettyURL() + "/foo/bar");
URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo");
HttpURLConnection conn=(HttpURLConnection)nonAuthURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
conn=(HttpURLConnection)authURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
URL url=new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN");
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
url=new URL(authURL.toExternalForm() + "&op=GETDELEGATIONTOKEN&renewer=foo");
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
ObjectMapper mapper=new ObjectMapper();
Map map=mapper.readValue(conn.getInputStream(),Map.class);
String dt=(String)((Map)map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
url=new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(authURL.toExternalForm() + "&delegation=" + dt);
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(nonAuthURL.toExternalForm() + "?op=RENEWDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode());
url=new URL(authURL.toExternalForm() + "&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(getJettyURL() + "/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode());
url=new URL(nonAuthURL.toExternalForm() + "?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
url=new URL(nonAuthURL.toExternalForm() + "?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,conn.getResponseCode());
url=new URL(authURL.toExternalForm() + "&op=GETDELEGATIONTOKEN&renewer=foo");
conn=(HttpURLConnection)url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
mapper=new ObjectMapper();
map=mapper.readValue(conn.getInputStream(),Map.class);
dt=(String)((Map)map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
url=new URL(authURL.toExternalForm() + "&op=CANCELDELEGATIONTOKEN&token=" + dt);
conn=(HttpURLConnection)url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
}
finally {
jetty.stop();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testKerberosDelegationTokenAuthenticator() throws Exception {
org.apache.hadoop.conf.Configuration conf=new org.apache.hadoop.conf.Configuration();
conf.set("hadoop.security.authentication","kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir=new File("target/" + UUID.randomUUID().toString());
Assert.assertTrue(testDir.mkdirs());
MiniKdc kdc=new MiniKdc(MiniKdc.createConf(),testDir);
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(KDTAFilter.class),"/*",0);
context.addServlet(new ServletHolder(UserServlet.class),"/bar");
try {
kdc.start();
File keytabFile=new File(testDir,"test.keytab");
kdc.createPrincipal(keytabFile,"client","HTTP/localhost");
KDTAFilter.keytabFile=keytabFile.getAbsolutePath();
jetty.start();
final DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
final DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
final URL url=new URL(getJettyURL() + "/foo/bar");
try {
aUrl.getDelegationToken(url,token,FOO_USER);
Assert.fail();
}
catch ( AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains("GSSException"));
}
doAsKerberosUser("client",keytabFile.getAbsolutePath(),new Callable(){
@Override public Void call() throws Exception {
aUrl.getDelegationToken(url,token,"client");
Assert.assertNotNull(token.getDelegationToken());
aUrl.renewDelegationToken(url,token);
Assert.assertNotNull(token.getDelegationToken());
aUrl.getDelegationToken(url,token,FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
try {
aUrl.renewDelegationToken(url,token);
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(url,token,FOO_USER);
aUrl.cancelDelegationToken(url,token);
Assert.assertNull(token.getDelegationToken());
return null;
}
}
);
}
finally {
jetty.stop();
kdc.stop();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFallbackToPseudoDelegationTokenAuthenticator() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0);
context.addServlet(new ServletHolder(UserServlet.class),"/bar");
try {
jetty.start();
final URL url=new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
HttpURLConnection conn=aUrl.openConnection(url,token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
List ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals(FOO_USER,ret.get(0));
aUrl.getDelegationToken(url,token,FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),token.getDelegationToken().getKind());
return null;
}
}
);
}
finally {
jetty.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testHttpUGI() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0);
context.addServlet(new ServletHolder(UGIServlet.class),"/bar");
try {
jetty.start();
final URL url=new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
HttpURLConnection conn=aUrl.openConnection(url,token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
List ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals("remoteuser=" + FOO_USER + ":ugi="+ FOO_USER,ret.get(0));
conn=aUrl.openConnection(url,token,OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals("realugi=" + FOO_USER + ":remoteuser="+ OK_USER+ ":ugi="+ OK_USER,ret.get(0));
return null;
}
}
);
}
finally {
jetty.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testProxyUser() throws Exception {
final Server jetty=createJettyServer();
Context context=new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0);
context.addServlet(new ServletHolder(UserServlet.class),"/bar");
try {
jetty.start();
final URL url=new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL();
HttpURLConnection conn=aUrl.openConnection(url,token,OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
List ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals(OK_USER,ret.get(0));
conn=aUrl.openConnection(url,token,FAIL_USER);
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode());
aUrl.getDelegationToken(url,token,FOO_USER);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
ugi.addToken(token.getDelegationToken());
token=new DelegationTokenAuthenticatedURL.Token();
conn=aUrl.openConnection(url,token,OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode());
ret=IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1,ret.size());
Assert.assertEquals(FOO_USER,ret.get(0));
return null;
}
}
);
}
finally {
jetty.stop();
}
}
IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testCallSequence(){
ServiceManager serviceManager=new ServiceManager("ServiceManager");
for (int i=0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service=new CompositeServiceImpl(i);
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services=serviceManager.getServices().toArray(new CompositeServiceImpl[0]);
assertEquals("Number of registered services ",NUM_OF_SERVICES,services.length);
Configuration conf=new Configuration();
serviceManager.init(conf);
assertInState(STATE.INITED,services);
for (int i=0; i < NUM_OF_SERVICES; i++) {
assertEquals("For " + services[i] + " service, init() call sequence number should have been ",i,services[i].getCallSequenceNumber());
}
resetServices(services);
serviceManager.start();
assertInState(STATE.STARTED,services);
for (int i=0; i < NUM_OF_SERVICES; i++) {
assertEquals("For " + services[i] + " service, start() call sequence number should have been ",i,services[i].getCallSequenceNumber());
}
resetServices(services);
serviceManager.stop();
assertInState(STATE.STOPPED,services);
for (int i=0; i < NUM_OF_SERVICES; i++) {
assertEquals("For " + services[i] + " service, stop() call sequence number should have been ",((NUM_OF_SERVICES - 1) - i),services[i].getCallSequenceNumber());
}
serviceManager.stop();
for (int i=0; i < NUM_OF_SERVICES; i++) {
assertEquals("For " + services[i] + " service, stop() call sequence number should have been ",((NUM_OF_SERVICES - 1) - i),services[i].getCallSequenceNumber());
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testAddInitedSiblingInInit() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService sibling=new BreakableService();
sibling.init(new Configuration());
parent.addService(new AddSiblingService(parent,sibling,STATE.INITED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",2,parent.getServices().size());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testAddUninitedSiblingInInit() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService sibling=new BreakableService();
parent.addService(new AddSiblingService(parent,sibling,STATE.INITED));
parent.init(new Configuration());
try {
parent.start();
fail("Expected an exception, got " + parent);
}
catch ( ServiceStateException e) {
}
parent.stop();
assertEquals("Incorrect number of services",2,parent.getServices().size());
}
UtilityVerifier InternalCallVerifier
@Test(timeout=1000) public void testAddStoppedChildBeforeInit() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService child=new BreakableService();
child.init(new Configuration());
child.start();
child.stop();
AddSiblingService.addChildToService(parent,child);
try {
parent.init(new Configuration());
fail("Expected an exception, got " + parent);
}
catch ( ServiceStateException e) {
}
parent.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testAddStartedSiblingInStart() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService sibling=new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,sibling,STATE.STARTED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",2,parent.getServices().size());
}
IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testServiceStartup(){
ServiceManager serviceManager=new ServiceManager("ServiceManager");
for (int i=0; i < NUM_OF_SERVICES; i++) {
CompositeServiceImpl service=new CompositeServiceImpl(i);
if (i == FAILED_SERVICE_SEQ_NUMBER) {
service.setThrowExceptionOnStart(true);
}
serviceManager.addTestService(service);
}
CompositeServiceImpl[] services=serviceManager.getServices().toArray(new CompositeServiceImpl[0]);
Configuration conf=new Configuration();
serviceManager.init(conf);
try {
serviceManager.start();
fail("Exception should have been thrown due to startup failure of last service");
}
catch ( ServiceTestRuntimeException e) {
for (int i=0; i < NUM_OF_SERVICES - 1; i++) {
if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) {
assertEquals("Service state should have been ",STATE.INITED,services[NUM_OF_SERVICES - 1].getServiceState());
}
else {
assertEquals("Service state should have been ",STATE.STOPPED,services[i].getServiceState());
}
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveService(){
CompositeService testService=new CompositeService("TestService"){
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
Service service1=new AbstractService("Service1"){
}
;
addIfService(service1);
Service service2=new AbstractService("Service2"){
}
;
addIfService(service2);
Service service3=new AbstractService("Service3"){
}
;
addIfService(service3);
removeService(service1);
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",2,testService.getServices().size());
}
UtilityVerifier InternalCallVerifier
@Test(timeout=1000) public void testAddStartedChildBeforeInit() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService child=new BreakableService();
child.init(new Configuration());
child.start();
AddSiblingService.addChildToService(parent,child);
try {
parent.init(new Configuration());
fail("Expected an exception, got " + parent);
}
catch ( ServiceStateException e) {
}
parent.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testAddStartedSiblingInStop() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService sibling=new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,sibling,STATE.STOPPED));
parent.init(new Configuration());
parent.start();
parent.stop();
assertEquals("Incorrect number of services",2,parent.getServices().size());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testAddStartedSiblingInInit() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService sibling=new BreakableService();
sibling.init(new Configuration());
sibling.start();
parent.addService(new AddSiblingService(parent,sibling,STATE.INITED));
parent.init(new Configuration());
assertInState(STATE.STARTED,sibling);
parent.start();
assertInState(STATE.STARTED,sibling);
parent.stop();
assertEquals("Incorrect number of services",2,parent.getServices().size());
assertInState(STATE.STOPPED,sibling);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=1000) public void testAddIfService(){
CompositeService testService=new CompositeService("TestService"){
Service service;
@Override public void serviceInit( Configuration conf){
Integer notAService=new Integer(0);
assertFalse("Added an integer as a service",addIfService(notAService));
service=new AbstractService("Service"){
}
;
assertTrue("Unable to add a service",addIfService(service));
}
}
;
testService.init(new Configuration());
assertEquals("Incorrect number of services",1,testService.getServices().size());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testAddUninitedSiblingInStart() throws Throwable {
CompositeService parent=new CompositeService("parent");
BreakableService sibling=new BreakableService();
parent.addService(new AddSiblingService(parent,sibling,STATE.STARTED));
parent.init(new Configuration());
assertInState(STATE.NOTINITED,sibling);
parent.start();
parent.stop();
assertEquals("Incorrect number of services",2,parent.getServices().size());
}
InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Test that the {@link BreakableStateChangeListener} is picking up
* the state changes and that its last event field is as expected.
*/
@Test public void testEventHistory(){
register();
BreakableService service=new BreakableService();
assertListenerState(listener,Service.STATE.NOTINITED);
assertEquals(0,listener.getEventCount());
service.init(new Configuration());
assertListenerState(listener,Service.STATE.INITED);
assertSame(service,listener.getLastService());
assertListenerEventCount(listener,1);
service.start();
assertListenerState(listener,Service.STATE.STARTED);
assertListenerEventCount(listener,2);
service.stop();
assertListenerState(listener,Service.STATE.STOPPED);
assertListenerEventCount(listener,3);
}
UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Show that if the service failed during an init
* operation, stop was called.
*/
@Test public void testStopFailingInitAndStop() throws Throwable {
BreakableService svc=new BreakableService(true,false,true);
svc.registerServiceListener(new LoggingStateChangeListener());
try {
svc.init(new Configuration());
fail("Expected a failure, got " + svc);
}
catch ( BreakableService.BrokenLifecycleEvent e) {
assertEquals(Service.STATE.INITED,e.state);
}
assertServiceStateStopped(svc);
assertEquals(Service.STATE.INITED,svc.getFailureState());
Throwable failureCause=svc.getFailureCause();
assertNotNull("Null failure cause in " + svc,failureCause);
BreakableService.BrokenLifecycleEvent cause=(BreakableService.BrokenLifecycleEvent)failureCause;
assertNotNull("null state in " + cause + " raised by "+ svc,cause.state);
assertEquals(Service.STATE.INITED,cause.state);
}
UtilityVerifier InternalCallVerifier
/**
* Show that if the service failed during an init
* operation, it stays in the created state, even after stopping it
* @throws Throwable if necessary
*/
@Test public void testStopFailedInit() throws Throwable {
BreakableService svc=new BreakableService(true,false,false);
assertServiceStateCreated(svc);
try {
svc.init(new Configuration());
fail("Expected a failure, got " + svc);
}
catch ( BreakableService.BrokenLifecycleEvent e) {
}
assertServiceStateStopped(svc);
assertStateCount(svc,Service.STATE.INITED,1);
assertStateCount(svc,Service.STATE.STOPPED,1);
svc.stop();
assertStateCount(svc,Service.STATE.STOPPED,1);
}
InternalCallVerifier EqualityVerifier
@Test public void testServiceFailingNotifications() throws Throwable {
BreakableService svc=new BreakableService(false,false,false);
BreakableStateChangeListener listener=new BreakableStateChangeListener();
listener.setFailingState(Service.STATE.STARTED);
svc.registerServiceListener(listener);
svc.init(new Configuration());
assertEventCount(listener,1);
svc.start();
assertEventCount(listener,2);
assertEquals(1,listener.getFailureCount());
svc.stop();
assertEventCount(listener,3);
assertEquals(1,listener.getFailureCount());
svc.stop();
}
UtilityVerifier InternalCallVerifier
/**
* verify that when a service fails during its stop operation,
* its state does not change.
* @throws Throwable if necessary
*/
@Test public void testFailingStop() throws Throwable {
BreakableService svc=new BreakableService(false,false,true);
svc.init(new Configuration());
svc.start();
try {
svc.stop();
fail("Expected a failure, got " + svc);
}
catch ( BreakableService.BrokenLifecycleEvent e) {
}
assertStateCount(svc,Service.STATE.STOPPED,1);
}
UtilityVerifier InternalCallVerifier
/**
* Show that if the service failed during an init
* operation, it stays in the created state, even after stopping it
* @throws Throwable if necessary
*/
@Test public void testStopFailedStart() throws Throwable {
BreakableService svc=new BreakableService(false,true,false);
svc.init(new Configuration());
assertServiceStateInited(svc);
try {
svc.start();
fail("Expected a failure, got " + svc);
}
catch ( BreakableService.BrokenLifecycleEvent e) {
}
assertServiceStateStopped(svc);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* This test verifies that you can block waiting for something to happen
* and use notifications to manage it
* @throws Throwable on a failure
*/
@Test public void testListenerWithNotifications() throws Throwable {
AsyncSelfTerminatingService service=new AsyncSelfTerminatingService(2000);
NotifyingListener listener=new NotifyingListener();
service.registerServiceListener(listener);
service.init(new Configuration());
service.start();
assertServiceInState(service,Service.STATE.STARTED);
long start=System.currentTimeMillis();
synchronized (listener) {
listener.wait(20000);
}
long duration=System.currentTimeMillis() - start;
assertEquals(Service.STATE.STOPPED,listener.notifyingState);
assertServiceInState(service,Service.STATE.STOPPED);
assertTrue("Duration of " + duration + " too long",duration < 10000);
}
IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings({"unchecked","deprecation"}) @Test public void testFormat() throws IOException {
JobConf job=new JobConf(conf);
FileSystem fs=FileSystem.getLocal(conf);
Path dir=new Path(System.getProperty("test.build.data",".") + "/mapred");
Path txtFile=new Path(dir,"auto.txt");
Path seqFile=new Path(dir,"auto.seq");
fs.delete(dir,true);
FileInputFormat.setInputPaths(job,dir);
Writer txtWriter=new OutputStreamWriter(fs.create(txtFile));
try {
for (int i=0; i < LINES_COUNT; i++) {
txtWriter.write("" + (10 * i));
txtWriter.write("\n");
}
}
finally {
txtWriter.close();
}
SequenceFile.Writer seqWriter=SequenceFile.createWriter(fs,conf,seqFile,IntWritable.class,LongWritable.class);
try {
for (int i=0; i < RECORDS_COUNT; i++) {
IntWritable key=new IntWritable(11 * i);
LongWritable value=new LongWritable(12 * i);
seqWriter.append(key,value);
}
}
finally {
seqWriter.close();
}
AutoInputFormat format=new AutoInputFormat();
InputSplit[] splits=format.getSplits(job,SPLITS_COUNT);
for ( InputSplit split : splits) {
RecordReader reader=format.getRecordReader(split,job,Reporter.NULL);
Object key=reader.createKey();
Object value=reader.createValue();
try {
while (reader.next(key,value)) {
if (key instanceof LongWritable) {
assertEquals("Wrong value class.",Text.class,value.getClass());
assertTrue("Invalid value",Integer.parseInt(((Text)value).toString()) % 10 == 0);
}
else {
assertEquals("Wrong key class.",IntWritable.class,key.getClass());
assertEquals("Wrong value class.",LongWritable.class,value.getClass());
assertTrue("Invalid key.",((IntWritable)key).get() % 11 == 0);
assertTrue("Invalid value.",((LongWritable)value).get() % 12 == 0);
}
}
}
finally {
reader.close();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testGoodClassOrNull() throws Exception {
String NAME="ClassWithNoPackage";
ClassLoader cl=TestClassWithNoPackage.class.getClassLoader();
String JAR=JarFinder.getJar(cl.loadClass(NAME));
Configuration conf=new Configuration();
conf.setClassLoader(new URLClassLoader(new URL[]{new URL("file",null,JAR)},null));
String defaultPackage=this.getClass().getPackage().getName();
Class c=StreamUtil.goodClassOrNull(conf,NAME,defaultPackage);
assertNotNull("Class " + NAME + " not found!",c);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDumping() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
PrintStream psBackup=System.out;
ByteArrayOutputStream out=new ByteArrayOutputStream();
PrintStream psOut=new PrintStream(out);
System.setOut(psOut);
DumpTypedBytes dumptb=new DumpTypedBytes(conf);
try {
Path root=new Path("/typedbytestest");
assertTrue(fs.mkdirs(root));
assertTrue(fs.exists(root));
OutputStreamWriter writer=new OutputStreamWriter(fs.create(new Path(root,"test.txt")));
try {
for (int i=0; i < 100; i++) {
writer.write("" + (10 * i) + "\n");
}
}
finally {
writer.close();
}
String[] args=new String[1];
args[0]="/typedbytestest";
int ret=dumptb.run(args);
assertEquals("Return value != 0.",0,ret);
ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray());
TypedBytesInput tbinput=new TypedBytesInput(new DataInputStream(in));
int counter=0;
Object key=tbinput.read();
while (key != null) {
assertEquals(Long.class,key.getClass());
Object value=tbinput.read();
assertEquals(String.class,value.getClass());
assertTrue("Invalid output.",Integer.parseInt(value.toString()) % 10 == 0);
counter++;
key=tbinput.read();
}
assertEquals("Wrong number of outputs.",100,counter);
}
finally {
try {
fs.close();
}
catch ( Exception e) {
}
System.setOut(psBackup);
cluster.shutdown();
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLoading() throws Exception {
Configuration conf=new Configuration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs=cluster.getFileSystem();
ByteArrayOutputStream out=new ByteArrayOutputStream();
TypedBytesOutput tboutput=new TypedBytesOutput(new DataOutputStream(out));
for (int i=0; i < 100; i++) {
tboutput.write(new Long(i));
tboutput.write("" + (10 * i));
}
InputStream isBackup=System.in;
ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray());
System.setIn(in);
LoadTypedBytes loadtb=new LoadTypedBytes(conf);
try {
Path root=new Path("/typedbytestest");
assertTrue(fs.mkdirs(root));
assertTrue(fs.exists(root));
String[] args=new String[1];
args[0]="/typedbytestest/test.seq";
int ret=loadtb.run(args);
assertEquals("Return value != 0.",0,ret);
Path file=new Path(root,"test.seq");
assertTrue(fs.exists(file));
SequenceFile.Reader reader=new SequenceFile.Reader(fs,file,conf);
int counter=0;
TypedBytesWritable key=new TypedBytesWritable();
TypedBytesWritable value=new TypedBytesWritable();
while (reader.next(key,value)) {
assertEquals(Long.class,key.getValue().getClass());
assertEquals(String.class,value.getValue().getClass());
assertTrue("Invalid record.",Integer.parseInt(value.toString()) % 10 == 0);
counter++;
}
assertEquals("Wrong number of records.",100,counter);
}
finally {
try {
fs.close();
}
catch ( Exception e) {
}
System.setIn(isBackup);
cluster.shutdown();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testFramework(){
JobConf jobConf=new JobConf();
jobConf.set(JTConfig.JT_IPC_ADDRESS,MRConfig.LOCAL_FRAMEWORK_NAME);
jobConf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME);
assertFalse("Expected 'isLocal' to be false",StreamUtil.isLocalJobTracker(jobConf));
jobConf.set(JTConfig.JT_IPC_ADDRESS,MRConfig.LOCAL_FRAMEWORK_NAME);
jobConf.set(MRConfig.FRAMEWORK_NAME,MRConfig.CLASSIC_FRAMEWORK_NAME);
assertFalse("Expected 'isLocal' to be false",StreamUtil.isLocalJobTracker(jobConf));
jobConf.set(JTConfig.JT_IPC_ADDRESS,"jthost:9090");
jobConf.set(MRConfig.FRAMEWORK_NAME,MRConfig.LOCAL_FRAMEWORK_NAME);
assertTrue("Expected 'isLocal' to be true",StreamUtil.isLocalJobTracker(jobConf));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMultipleCachefiles() throws Exception {
boolean mayExit=false;
MiniMRCluster mr=null;
MiniDFSCluster dfs=null;
try {
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys=dfs.getFileSystem();
String namenode=fileSys.getUri().toString();
mr=new MiniMRCluster(1,namenode,3);
List args=new ArrayList();
for ( Map.Entry entry : mr.createJobConf()) {
args.add("-jobconf");
args.add(entry.getKey() + "=" + entry.getValue());
}
String argv[]=new String[]{"-input",INPUT_FILE,"-output",OUTPUT_DIR,"-mapper",map,"-reducer",reduce,"-jobconf","stream.tmpdir=" + System.getProperty("test.build.data","/tmp"),"-jobconf",JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-jobconf",JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-cacheFile",fileSys.getUri() + CACHE_FILE + "#"+ mapString,"-cacheFile",fileSys.getUri() + CACHE_FILE_2 + "#"+ mapString2,"-jobconf","mapred.jar=" + TestStreaming.STREAMING_JAR};
for ( String arg : argv) {
args.add(arg);
}
argv=args.toArray(new String[args.size()]);
fileSys.delete(new Path(OUTPUT_DIR),true);
DataOutputStream file=fileSys.create(new Path(INPUT_FILE));
file.writeBytes(mapString + "\n");
file.writeBytes(mapString2 + "\n");
file.close();
file=fileSys.create(new Path(CACHE_FILE));
file.writeBytes(cacheString + "\n");
file.close();
file=fileSys.create(new Path(CACHE_FILE_2));
file.writeBytes(cacheString2 + "\n");
file.close();
job=new StreamJob(argv,mayExit);
job.go();
fileSys=dfs.getFileSystem();
String line=null;
String line2=null;
Path[] fileList=FileUtil.stat2Paths(fileSys.listStatus(new Path(OUTPUT_DIR),new Utils.OutputFileUtils.OutputFilesFilter()));
for (int i=0; i < fileList.length; i++) {
System.out.println(fileList[i].toString());
BufferedReader bread=new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
line=bread.readLine();
System.out.println(line);
line2=bread.readLine();
System.out.println(line2);
}
assertEquals(cacheString + "\t",line);
assertEquals(cacheString2 + "\t",line2);
}
finally {
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testOptions() throws Exception {
StreamJob streamJob=new StreamJob();
assertEquals(1,streamJob.run(new String[0]));
assertEquals(0,streamJob.run(new String[]{"-help"}));
assertEquals(0,streamJob.run(new String[]{"-info"}));
}
InternalCallVerifier BooleanVerifier
@Test public void testCommandLine() throws Exception {
super.testCommandLine();
String counterGrp="org.apache.hadoop.mapred.Task$Counter";
Counters counters=job.running_.getCounters();
assertTrue(counters.findCounter(counterGrp,"COMBINE_INPUT_RECORDS").getValue() != 0);
assertTrue(counters.findCounter(counterGrp,"COMBINE_OUTPUT_RECORDS").getValue() != 0);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=120000) public void testSymLink() throws Exception {
boolean mayExit=false;
MiniMRCluster mr=null;
MiniDFSCluster dfs=null;
try {
Configuration conf=new Configuration();
dfs=new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys=dfs.getFileSystem();
String namenode=fileSys.getUri().toString();
mr=new MiniMRCluster(1,namenode,3);
List args=new ArrayList();
for ( Map.Entry entry : mr.createJobConf()) {
args.add("-jobconf");
args.add(entry.getKey() + "=" + entry.getValue());
}
String argv[]=new String[]{"-input",INPUT_FILE,"-output",OUTPUT_DIR,"-mapper",map,"-reducer",reduce,"-jobconf","stream.tmpdir=" + System.getProperty("test.build.data","/tmp"),"-jobconf",JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-jobconf",JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-cacheFile",fileSys.getUri() + CACHE_FILE + "#testlink","-jobconf","mapred.jar=" + TestStreaming.STREAMING_JAR};
for ( String arg : argv) {
args.add(arg);
}
argv=args.toArray(new String[args.size()]);
fileSys.delete(new Path(OUTPUT_DIR),true);
DataOutputStream file=fileSys.create(new Path(INPUT_FILE));
file.writeBytes(mapString);
file.close();
file=fileSys.create(new Path(CACHE_FILE));
file.writeBytes(cacheString);
file.close();
job=new StreamJob(argv,mayExit);
job.go();
fileSys=dfs.getFileSystem();
String line=null;
Path[] fileList=FileUtil.stat2Paths(fileSys.listStatus(new Path(OUTPUT_DIR),new Utils.OutputFileUtils.OutputFilesFilter()));
for (int i=0; i < fileList.length; i++) {
System.out.println(fileList[i].toString());
BufferedReader bread=new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
line=bread.readLine();
System.out.println(line);
}
assertEquals(cacheString + "\t",line);
}
finally {
if (dfs != null) {
dfs.shutdown();
}
if (mr != null) {
mr.shutdown();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testUnconsumedInput() throws Exception {
String outFileName="part-00000";
File outFile=null;
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
catch ( Exception e) {
}
createInput();
Configuration conf=new Configuration();
conf.set("stream.minRecWrittenToEnableSkip_","0");
job=new StreamJob();
job.setConf(conf);
int exitCode=job.run(genArgs());
assertEquals("Job failed",0,exitCode);
outFile=new File(OUTPUT_DIR,outFileName).getAbsoluteFile();
String output=StreamUtil.slurp(outFile);
assertEquals("Output was truncated",EXPECTED_OUTPUT_SIZE,StringUtils.countMatches(output,"\t"));
}
finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
InternalCallVerifier EqualityVerifier
@Test public void testKeyOnlyTextOutputReader() throws IOException {
String text="key,value\nkey2,value2\nnocomma\n";
PipeMapRed pipeMapRed=new MyPipeMapRed(text);
KeyOnlyTextOutputReader outputReader=new KeyOnlyTextOutputReader();
outputReader.initialize(pipeMapRed);
outputReader.readKeyValue();
Assert.assertEquals(new Text("key,value"),outputReader.getCurrentKey());
outputReader.readKeyValue();
Assert.assertEquals(new Text("key2,value2"),outputReader.getCurrentKey());
outputReader.readKeyValue();
Assert.assertEquals(new Text("nocomma"),outputReader.getCurrentKey());
Assert.assertEquals(false,outputReader.readKeyValue());
}
InternalCallVerifier EqualityVerifier
@Test public void testStreamXmlRecordReader() throws Exception {
Job job=new Job();
Configuration conf=job.getConfiguration();
job.setJarByClass(TestStreamXmlRecordReader.class);
job.setMapperClass(Mapper.class);
conf.set("stream.recordreader.class","org.apache.hadoop.streaming.mapreduce.StreamXmlRecordReader");
conf.set("stream.recordreader.begin","");
conf.set("stream.recordreader.end"," ");
job.setInputFormatClass(StreamInputFormat.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
FileInputFormat.addInputPath(job,new Path("target/input.xml"));
OUTPUT_DIR=new Path("target/output");
fs=FileSystem.get(conf);
if (fs.exists(OUTPUT_DIR)) {
fs.delete(OUTPUT_DIR,true);
}
FileOutputFormat.setOutputPath(job,OUTPUT_DIR);
boolean ret=job.waitForCompletion(true);
assertEquals(true,ret);
checkOutput();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @TestHdfs public void testHadoopFileSystem() throws Exception {
Configuration conf=TestHdfsHelper.getHdfsConf();
FileSystem fs=FileSystem.get(conf);
try {
OutputStream os=fs.create(new Path(TestHdfsHelper.getHdfsTestDir(),"foo"));
os.write(new byte[]{1});
os.close();
InputStream is=fs.open(new Path(TestHdfsHelper.getHdfsTestDir(),"foo"));
assertEquals(is.read(),1);
assertEquals(is.read(),-1);
is.close();
}
finally {
fs.close();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMultipleExistingUsers() throws Exception {
String actualOutput=runTool(conf,new String[]{testUser1.getUserName(),testUser2.getUserName()},true);
assertEquals("Show the output for both users given",getExpectedOutput(testUser1) + getExpectedOutput(testUser2),actualOutput);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testExistingInterleavedWithNonExistentUsers() throws Exception {
String actualOutput=runTool(conf,new String[]{"does-not-exist1",testUser1.getUserName(),"does-not-exist2",testUser2.getUserName()},true);
assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) + getExpectedOutput(testUser1) + getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2"))+ getExpectedOutput(testUser2),actualOutput);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testExistingUser() throws Exception {
String actualOutput=runTool(conf,new String[]{testUser1.getUserName()},true);
assertEquals("Show only the output of the user given",getExpectedOutput(testUser1),actualOutput);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testBuildListing(){
FileSystem fs=null;
try {
fs=FileSystem.get(getConf());
List srcPaths=new ArrayList();
Path p1=new Path("/tmp/in/1");
Path p2=new Path("/tmp/in/2");
Path p3=new Path("/tmp/in2/2");
Path target=new Path("/tmp/out/1");
srcPaths.add(p1.getParent());
srcPaths.add(p3.getParent());
TestDistCpUtils.createFile(fs,"/tmp/in/1");
TestDistCpUtils.createFile(fs,"/tmp/in/2");
TestDistCpUtils.createFile(fs,"/tmp/in2/2");
fs.mkdirs(target);
OutputStream out=fs.create(p1);
out.write("ABC".getBytes());
out.close();
out=fs.create(p2);
out.write("DEF".getBytes());
out.close();
out=fs.create(p3);
out.write("GHIJ".getBytes());
out.close();
Path listingFile=new Path("/tmp/file");
DistCpOptions options=new DistCpOptions(srcPaths,target);
options.setSyncFolder(true);
CopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS);
try {
listing.buildListing(listingFile,options);
Assert.fail("Duplicates not detected");
}
catch ( DuplicateFileException ignore) {
}
Assert.assertEquals(listing.getBytesToCopy(),10);
Assert.assertEquals(listing.getNumberOfPaths(),3);
TestDistCpUtils.delete(fs,"/tmp");
try {
listing.buildListing(listingFile,options);
Assert.fail("Invalid input not detected");
}
catch ( InvalidInputException ignore) {
}
TestDistCpUtils.delete(fs,"/tmp");
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Test build listing failed");
}
finally {
TestDistCpUtils.delete(fs,"/tmp");
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testSkipCopy() throws Exception {
SimpleCopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS){
@Override protected boolean shouldCopy( Path path, DistCpOptions options){
return !path.getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME);
}
}
;
FileSystem fs=FileSystem.get(getConf());
List srcPaths=new ArrayList();
srcPaths.add(new Path("/tmp/in4/1"));
srcPaths.add(new Path("/tmp/in4/2"));
Path target=new Path("/tmp/out4/1");
TestDistCpUtils.createFile(fs,"/tmp/in4/1/_SUCCESS");
TestDistCpUtils.createFile(fs,"/tmp/in4/1/file");
TestDistCpUtils.createFile(fs,"/tmp/in4/2");
fs.mkdirs(target);
DistCpOptions options=new DistCpOptions(srcPaths,target);
Path listingFile=new Path("/tmp/list4");
listing.buildListing(listingFile,options);
Assert.assertEquals(listing.getNumberOfPaths(),3);
SequenceFile.Reader reader=new SequenceFile.Reader(getConf(),SequenceFile.Reader.file(listingFile));
CopyListingFileStatus fileStatus=new CopyListingFileStatus();
Text relativePath=new Text();
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertEquals(relativePath.toString(),"/1");
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertEquals(relativePath.toString(),"/1/file");
Assert.assertTrue(reader.next(relativePath,fileStatus));
Assert.assertEquals(relativePath.toString(),"/2");
Assert.assertFalse(reader.next(relativePath,fileStatus));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test main method of DistCp. Method should to call System.exit().
*/
@Test public void testCleanupTestViaToolRunner() throws IOException, InterruptedException {
Configuration conf=getConf();
Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure=createFile("tmp.txt");
Path target=createFile("target.txt");
try {
String[] arg={target.toString(),soure.toString()};
DistCp.main(arg);
Assert.fail();
}
catch ( ExitException t) {
Assert.assertTrue(fs.exists(target));
Assert.assertEquals(t.status,0);
Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0);
}
}
InternalCallVerifier BooleanVerifier
/**
* test methods run end execute of DistCp class. silple copy file
* @throws Exception
*/
@Test public void testCleanup() throws Exception {
Configuration conf=getConf();
Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
Path soure=createFile("tmp.txt");
Path target=createFile("target.txt");
DistCp distcp=new DistCp(conf,null);
String[] arg={soure.toString(),target.toString()};
distcp.run(arg);
Assert.assertTrue(fs.exists(target));
}
BranchVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testReadFileContent() throws Exception {
fileList.add(createFile(inputPath,fs,"c c"));
final Path sub1=new Path(inputPath,"sub 1");
fs.mkdirs(sub1);
fileList.add(createFile(inputPath,fs,sub1.getName(),"file x y z"));
fileList.add(createFile(inputPath,fs,sub1.getName(),"file"));
fileList.add(createFile(inputPath,fs,sub1.getName(),"x"));
fileList.add(createFile(inputPath,fs,sub1.getName(),"y"));
fileList.add(createFile(inputPath,fs,sub1.getName(),"z"));
final Path sub2=new Path(inputPath,"sub 1 with suffix");
fs.mkdirs(sub2);
fileList.add(createFile(inputPath,fs,sub2.getName(),"z"));
final byte[] binContent=prepareBin();
fileList.add(createFile(inputPath,fs,binContent,sub2.getName(),"bin"));
fileList.add(createFile(inputPath,fs,new byte[0],sub2.getName(),"zero-length"));
final String fullHarPathStr=makeArchive();
final HarFileSystem harFileSystem=new HarFileSystem(fs);
try {
final URI harUri=new URI(fullHarPathStr);
harFileSystem.initialize(harUri,fs.getConf());
int readFileCount=0;
for ( final String pathStr0 : fileList) {
final Path path=new Path(fullHarPathStr + Path.SEPARATOR + pathStr0);
final String baseName=path.getName();
final FileStatus status=harFileSystem.getFileStatus(path);
if (status.isFile()) {
final byte[] actualContentSimple=readAllSimple(harFileSystem.open(path),true);
final byte[] actualContentBuffer=readAllWithBuffer(harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentBuffer);
final byte[] actualContentFully=readAllWithReadFully(actualContentSimple.length,harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentFully);
final byte[] actualContentSeek=readAllWithSeek(actualContentSimple.length,harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentSeek);
final byte[] actualContentRead4=readAllWithRead4(harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentRead4);
final byte[] actualContentSkip=readAllWithSkip(actualContentSimple.length,harFileSystem.open(path),harFileSystem.open(path),true);
assertArrayEquals(actualContentSimple,actualContentSkip);
if ("bin".equals(baseName)) {
assertArrayEquals(binContent,actualContentSimple);
}
else if ("zero-length".equals(baseName)) {
assertEquals(0,actualContentSimple.length);
}
else {
String actual=new String(actualContentSimple,"UTF-8");
assertEquals(baseName,actual);
}
readFileCount++;
}
}
assertEquals(fileList.size(),readFileCount);
}
finally {
harFileSystem.close();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCopyToLocal() throws Exception {
final String fullHarPathStr=makeArchive();
final String tmpDir=System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp";
final Path tmpPath=new Path(tmpDir);
final LocalFileSystem localFs=FileSystem.getLocal(new Configuration());
localFs.delete(tmpPath,true);
localFs.mkdirs(tmpPath);
assertTrue(localFs.exists(tmpPath));
final HarFileSystem harFileSystem=new HarFileSystem(fs);
try {
final URI harUri=new URI(fullHarPathStr);
harFileSystem.initialize(harUri,fs.getConf());
final Path sourcePath=new Path(fullHarPathStr + Path.SEPARATOR + "a");
final Path targetPath=new Path(tmpPath,"straus");
harFileSystem.copyToLocalFile(false,sourcePath,targetPath);
FileStatus straus=localFs.getFileStatus(targetPath);
assertEquals(1,straus.getLen());
}
finally {
harFileSystem.close();
localFs.delete(tmpPath,true);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPathWithSpaces() throws Exception {
createFile(inputPath,fs,"c c");
final Path sub1=new Path(inputPath,"sub 1");
fs.mkdirs(sub1);
createFile(sub1,fs,"file x y z");
createFile(sub1,fs,"file");
createFile(sub1,fs,"x");
createFile(sub1,fs,"y");
createFile(sub1,fs,"z");
final Path sub2=new Path(inputPath,"sub 1 with suffix");
fs.mkdirs(sub2);
createFile(sub2,fs,"z");
final FsShell shell=new FsShell(conf);
final String inputPathStr=inputPath.toUri().getPath();
final List originalPaths=lsr(shell,inputPathStr);
final String fullHarPathStr=makeArchive();
final List harPaths=lsr(shell,fullHarPathStr);
Assert.assertEquals(originalPaths,harPaths);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) public void testCleanup(){
try {
Path sourcePath=new Path("noscheme:///file");
List sources=new ArrayList();
sources.add(sourcePath);
DistCpOptions options=new DistCpOptions(sources,target);
Configuration conf=getConf();
Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf);
stagingDir.getFileSystem(conf).mkdirs(stagingDir);
try {
new DistCp(conf,options).execute();
}
catch ( Throwable t) {
Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0);
}
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
Assert.fail("testCleanup failed " + e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test JMX connection to DataNode..
* @throws Exception
*/
@Test public void testDataNode() throws Exception {
int numDatanodes=2;
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test"),fileSize,fileSize,blockSize,(short)2,seed);
JMXGet jmx=new JMXGet();
String serviceName="DataNode";
jmx.setService(serviceName);
jmx.init();
assertEquals(fileSize,Integer.parseInt(jmx.getValue("BytesWritten")));
cluster.shutdown();
MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer();
ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*");
Set names=mbsc.queryNames(query,null);
assertTrue("No beans should be registered for " + serviceName,names.isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* test JMX connection to NameNode..
* @throws Exception
*/
@Test public void testNameNode() throws Exception {
int numDatanodes=2;
cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build();
cluster.waitActive();
DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test1"),fileSize,fileSize,blockSize,(short)2,seed);
JMXGet jmx=new JMXGet();
String serviceName="NameNode";
jmx.setService(serviceName);
jmx.init();
assertTrue("error printAllValues",checkPrintAllValues(jmx));
assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumLiveDataNodes")));
assertGauge("CorruptBlocks",Long.parseLong(jmx.getValue("CorruptBlocks")),getMetrics("FSNamesystem"));
assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumOpenConnections")));
cluster.shutdown();
MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer();
ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*");
Set names=mbsc.queryNames(query,null);
assertTrue("No beans should be registered for " + serviceName,names.isEmpty());
}
InternalCallVerifier BooleanVerifier
@Test public void testParseSkipCRC(){
DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldSkipCRC());
options=OptionsParser.parse(new String[]{"-update","-skipcrccheck","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldSkipCRC());
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testParseDeleteMissing(){
DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldDeleteMissing());
options=OptionsParser.parse(new String[]{"-update","-delete","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldSyncFolder());
Assert.assertTrue(options.shouldDeleteMissing());
options=OptionsParser.parse(new String[]{"-overwrite","-delete","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldOverwrite());
Assert.assertTrue(options.shouldDeleteMissing());
try {
OptionsParser.parse(new String[]{"-atomic","-delete","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.fail("Atomic and delete folders were allowed");
}
catch ( IllegalArgumentException ignore) {
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPreserve(){
DistCpOptions options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
options=OptionsParser.parse(new String[]{"-p","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-p","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-pbr","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-pbrgup","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-pbrgupcax","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertTrue(options.shouldPreserve(FileAttribute.ACL));
Assert.assertTrue(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-pc","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR));
options=OptionsParser.parse(new String[]{"-p","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
int i=0;
Iterator attribIterator=options.preserveAttributes();
while (attribIterator.hasNext()) {
attribIterator.next();
i++;
}
Assert.assertEquals(i,6);
try {
OptionsParser.parse(new String[]{"-pabcd","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target"});
Assert.fail("Invalid preserve attribute");
}
catch ( IllegalArgumentException ignore) {
}
catch ( NoSuchElementException ignore) {
}
options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
options.preserve(FileAttribute.PERMISSION);
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
options.preserve(FileAttribute.PERMISSION);
Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testOptionsAppendToConf(){
Configuration conf=new Configuration();
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),false));
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false));
DistCpOptions options=OptionsParser.parse(new String[]{"-atomic","-i","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false));
Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(),-1),DistCpConstants.DEFAULT_BANDWIDTH_MB);
conf=new Configuration();
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false));
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(),false));
Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()),null);
options=OptionsParser.parse(new String[]{"-update","-delete","-pu","-bandwidth","11","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(),false));
Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()),"U");
Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(),-1),11);
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testOptionsSwitchAddToConf(){
Configuration conf=new Configuration();
Assert.assertNull(conf.get(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel()));
DistCpOptionSwitch.addToConf(conf,DistCpOptionSwitch.ATOMIC_COMMIT);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false));
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testAppendOption(){
Configuration conf=new Configuration();
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(),false));
Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false));
DistCpOptions options=OptionsParser.parse(new String[]{"-update","-append","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
options.appendToConf(conf);
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(),false));
Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false));
try {
options=OptionsParser.parse(new String[]{"-append","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
fail("Append should fail if update option is not specified");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Append is valid only with update options",e);
}
try {
options=OptionsParser.parse(new String[]{"-append","-update","-skipcrccheck","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"});
fail("Append should fail if skipCrc option is specified");
}
catch ( IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Append is disallowed when skipping CRC",e);
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testDeleteMissingFlatInterleavedFiles(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf=jobContext.getConfiguration();
String sourceBase;
String targetBase;
FileSystem fs=null;
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
fs=FileSystem.get(conf);
sourceBase="/tmp1/" + String.valueOf(rand.nextLong());
targetBase="/tmp1/" + String.valueOf(rand.nextLong());
TestDistCpUtils.createFile(fs,sourceBase + "/1");
TestDistCpUtils.createFile(fs,sourceBase + "/3");
TestDistCpUtils.createFile(fs,sourceBase + "/4");
TestDistCpUtils.createFile(fs,sourceBase + "/5");
TestDistCpUtils.createFile(fs,sourceBase + "/7");
TestDistCpUtils.createFile(fs,sourceBase + "/8");
TestDistCpUtils.createFile(fs,sourceBase + "/9");
TestDistCpUtils.createFile(fs,targetBase + "/2");
TestDistCpUtils.createFile(fs,targetBase + "/4");
TestDistCpUtils.createFile(fs,targetBase + "/5");
TestDistCpUtils.createFile(fs,targetBase + "/7");
TestDistCpUtils.createFile(fs,targetBase + "/9");
TestDistCpUtils.createFile(fs,targetBase + "/A");
DistCpOptions options=new DistCpOptions(Arrays.asList(new Path(sourceBase)),new Path("/out"));
options.setSyncFolder(true);
options.setDeleteMissing(true);
options.appendToConf(conf);
CopyListing listing=new GlobbedCopyListing(conf,CREDENTIALS);
Path listingFile=new Path("/tmp1/" + String.valueOf(rand.nextLong()));
listing.buildListing(listingFile,options);
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,targetBase);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetBase);
committer.commitJob(jobContext);
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs,targetBase,sourceBase)) {
Assert.fail("Source and target folders are not in sync");
}
Assert.assertEquals(fs.listStatus(new Path(targetBase)).length,4);
committer.commitJob(jobContext);
if (!TestDistCpUtils.checkIfFoldersAreInSync(fs,targetBase,sourceBase)) {
Assert.fail("Source and target folders are not in sync");
}
Assert.assertEquals(fs.listStatus(new Path(targetBase)).length,4);
}
catch ( IOException e) {
LOG.error("Exception encountered while testing for delete missing",e);
Assert.fail("Delete missing failure");
}
finally {
TestDistCpUtils.delete(fs,"/tmp1");
conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING,"false");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testAtomicCommitExistingFinal(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf=jobContext.getConfiguration();
String workPath="/tmp1/" + String.valueOf(rand.nextLong());
String finalPath="/tmp1/" + String.valueOf(rand.nextLong());
FileSystem fs=null;
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
fs=FileSystem.get(conf);
fs.mkdirs(new Path(workPath));
fs.mkdirs(new Path(finalPath));
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,workPath);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,true);
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
try {
committer.commitJob(jobContext);
Assert.fail("Should not be able to atomic-commit to pre-existing path.");
}
catch ( Exception exception) {
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
LOG.info("Atomic-commit Test pass.");
}
}
catch ( IOException e) {
LOG.error("Exception encountered while testing for atomic commit.",e);
Assert.fail("Atomic commit failure");
}
finally {
TestDistCpUtils.delete(fs,workPath);
TestDistCpUtils.delete(fs,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,false);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testAtomicCommitMissingFinal(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
Configuration conf=jobContext.getConfiguration();
String workPath="/tmp1/" + String.valueOf(rand.nextLong());
String finalPath="/tmp1/" + String.valueOf(rand.nextLong());
FileSystem fs=null;
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
fs=FileSystem.get(conf);
fs.mkdirs(new Path(workPath));
conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,workPath);
conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,true);
Assert.assertTrue(fs.exists(new Path(workPath)));
Assert.assertFalse(fs.exists(new Path(finalPath)));
committer.commitJob(jobContext);
Assert.assertFalse(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
committer.commitJob(jobContext);
Assert.assertFalse(fs.exists(new Path(workPath)));
Assert.assertTrue(fs.exists(new Path(finalPath)));
}
catch ( IOException e) {
LOG.error("Exception encountered while testing for preserve status",e);
Assert.fail("Atomic commit failure");
}
finally {
TestDistCpUtils.delete(fs,workPath);
TestDistCpUtils.delete(fs,finalPath);
conf.setBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY,false);
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNoCommitAction(){
TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config);
JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID());
try {
OutputCommitter committer=new CopyCommitter(null,taskAttemptContext);
committer.commitJob(jobContext);
Assert.assertEquals(taskAttemptContext.getStatus(),"Commit Successful");
committer.commitJob(jobContext);
Assert.assertEquals(taskAttemptContext.getStatus(),"Commit Successful");
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Commit failed");
}
}
InternalCallVerifier EqualityVerifier
@Test public void testCopyWithAppend() throws Exception {
final FileSystem fs=cluster.getFileSystem();
testCopy(false);
appendSourceData();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
context.getConfiguration().setBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(),true);
copyMapper.setup(context);
for ( Path path : pathList) {
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),path)),new CopyListingFileStatus(cluster.getFileSystem().getFileStatus(path)),context);
}
verifyCopy(fs,false);
Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE * 2,stubContext.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED).getValue());
Assert.assertEquals(pathList.size(),stubContext.getReporter().getCounter(CopyMapper.Counter.COPY).getValue());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=40000) public void testSkipCopyNoPerms(){
try {
deleteState();
createSourceData();
UserGroupInformation tmpUser=UserGroupInformation.createRemoteUser("guest");
final CopyMapper copyMapper=new CopyMapper();
final StubContext stubContext=tmpUser.doAs(new PrivilegedAction(){
@Override public StubContext run(){
try {
return new StubContext(getConfiguration(),null,0);
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
throw new RuntimeException(e);
}
}
}
);
final Mapper.Context context=stubContext.getContext();
EnumSet preserveStatus=EnumSet.allOf(DistCpOptions.FileAttribute.class);
preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
preserveStatus.remove(DistCpOptions.FileAttribute.XATTR);
context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,DistCpUtils.packAttributes(preserveStatus));
touchFile(SOURCE_PATH + "/src/file");
touchFile(TARGET_PATH + "/src/file");
cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file"),new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ));
cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file"),new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ));
final FileSystem tmpFS=tmpUser.doAs(new PrivilegedAction(){
@Override public FileSystem run(){
try {
return FileSystem.get(configuration);
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Test failed: " + e.getMessage());
throw new RuntimeException("Test ought to fail here");
}
}
}
);
tmpUser.doAs(new PrivilegedAction(){
@Override public Integer run(){
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context);
Assert.assertEquals(stubContext.getWriter().values().size(),1);
Assert.assertTrue(stubContext.getWriter().values().get(0).toString().startsWith("SKIP"));
Assert.assertTrue(stubContext.getWriter().values().get(0).toString().contains(SOURCE_PATH + "/src/file"));
}
catch ( Exception e) {
throw new RuntimeException(e);
}
return null;
}
}
);
}
catch ( Exception e) {
LOG.error("Exception encountered ",e);
Assert.fail("Test failed: " + e.getMessage());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* If a single file is being copied to a location where the file (of the same
* name) already exists, then the file shouldn't be skipped.
*/
@Test(timeout=40000) public void testSingleFileCopy(){
try {
deleteState();
touchFile(SOURCE_PATH + "/1");
Path sourceFilePath=pathList.get(0);
Path targetFilePath=new Path(sourceFilePath.toString().replaceAll(SOURCE_PATH,TARGET_PATH));
touchFile(targetFilePath.toString());
FileSystem fs=cluster.getFileSystem();
CopyMapper copyMapper=new CopyMapper();
StubContext stubContext=new StubContext(getConfiguration(),null,0);
Mapper.Context context=stubContext.getContext();
context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.getParent().toString());
copyMapper.setup(context);
final CopyListingFileStatus sourceFileStatus=new CopyListingFileStatus(fs.getFileStatus(sourceFilePath));
long before=fs.getFileStatus(targetFilePath).getModificationTime();
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context);
long after=fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been skipped",before == after);
context.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetFilePath.toString());
copyMapper.setup(context);
before=fs.getFileStatus(targetFilePath).getModificationTime();
try {
Thread.sleep(2);
}
catch ( Throwable ignore) {
}
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),sourceFilePath)),sourceFileStatus,context);
after=fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been overwritten.",before < after);
}
catch ( Exception exception) {
Assert.fail("Unexpected exception: " + exception.getMessage());
exception.printStackTrace();
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSetWorkingDirectory(){
try {
Job job=Job.getInstance(new Configuration());
Assert.assertEquals(null,CopyOutputFormat.getWorkingDirectory(job));
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,"");
Assert.assertEquals(null,CopyOutputFormat.getWorkingDirectory(job));
Path directory=new Path("/tmp/test");
CopyOutputFormat.setWorkingDirectory(job,directory);
Assert.assertEquals(directory,CopyOutputFormat.getWorkingDirectory(job));
Assert.assertEquals(directory.toString(),job.getConfiguration().get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
}
catch ( IOException e) {
LOG.error("Exception encountered while running test",e);
Assert.fail("Failed while testing for set Working Directory");
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSetCommitDirectory(){
try {
Job job=Job.getInstance(new Configuration());
Assert.assertEquals(null,CopyOutputFormat.getCommitDirectory(job));
job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,"");
Assert.assertEquals(null,CopyOutputFormat.getCommitDirectory(job));
Path directory=new Path("/tmp/test");
CopyOutputFormat.setCommitDirectory(job,directory);
Assert.assertEquals(directory,CopyOutputFormat.getCommitDirectory(job));
Assert.assertEquals(directory.toString(),job.getConfiguration().get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
}
catch ( IOException e) {
LOG.error("Exception encountered while running test",e);
Assert.fail("Failed while testing for set Commit Directory");
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testGetOutputCommitter(){
try {
TaskAttemptContext context=new TaskAttemptContextImpl(new Configuration(),new TaskAttemptID("200707121733",1,TaskType.MAP,1,1));
context.getConfiguration().set("mapred.output.dir","/out");
Assert.assertTrue(new CopyOutputFormat().getOutputCommitter(context) instanceof CopyCommitter);
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Unable to get output committer");
}
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetSplits() throws Exception {
DistCpOptions options=getOptions();
Configuration configuration=new Configuration();
configuration.set("mapred.map.tasks",String.valueOf(options.getMaxMaps()));
CopyListing.getCopyListing(configuration,CREDENTIALS,options).buildListing(new Path(cluster.getFileSystem().getUri().toString() + "/tmp/testDynInputFormat/fileList.seq"),options);
JobContext jobContext=new JobContextImpl(configuration,new JobID());
DynamicInputFormat inputFormat=new DynamicInputFormat();
List splits=inputFormat.getSplits(jobContext);
int nFiles=0;
int taskId=0;
for ( InputSplit split : splits) {
RecordReader recordReader=inputFormat.createRecordReader(split,null);
StubContext stubContext=new StubContext(jobContext.getConfiguration(),recordReader,taskId);
final TaskAttemptContext taskAttemptContext=stubContext.getContext();
recordReader.initialize(splits.get(0),taskAttemptContext);
float previousProgressValue=0f;
while (recordReader.nextKeyValue()) {
CopyListingFileStatus fileStatus=recordReader.getCurrentValue();
String source=fileStatus.getPath().toString();
System.out.println(source);
Assert.assertTrue(expectedFilePaths.contains(source));
final float progress=recordReader.getProgress();
Assert.assertTrue(progress >= previousProgressValue);
Assert.assertTrue(progress >= 0.0f);
Assert.assertTrue(progress <= 1.0f);
previousProgressValue=progress;
++nFiles;
}
Assert.assertTrue(recordReader.getProgress() == 1.0f);
++taskId;
}
Assert.assertEquals(expectedFilePaths.size(),nFiles);
}
InternalCallVerifier EqualityVerifier
@Test public void testGetSplitRatio() throws Exception {
Assert.assertEquals(1,DynamicInputFormat.getSplitRatio(1,1000000000));
Assert.assertEquals(2,DynamicInputFormat.getSplitRatio(11000000,10));
Assert.assertEquals(4,DynamicInputFormat.getSplitRatio(30,700));
Assert.assertEquals(2,DynamicInputFormat.getSplitRatio(30,200));
Configuration conf=new Configuration();
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE,-1);
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL,-1);
conf.setInt(DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK,-1);
conf.setInt(DistCpConstants.CONF_LABEL_SPLIT_RATIO,-1);
Assert.assertEquals(1,DynamicInputFormat.getSplitRatio(1,1000000000,conf));
Assert.assertEquals(2,DynamicInputFormat.getSplitRatio(11000000,10,conf));
Assert.assertEquals(4,DynamicInputFormat.getSplitRatio(30,700,conf));
Assert.assertEquals(2,DynamicInputFormat.getSplitRatio(30,200,conf));
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE,100);
conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL,30);
conf.setInt(DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK,10);
conf.setInt(DistCpConstants.CONF_LABEL_SPLIT_RATIO,53);
Assert.assertEquals(53,DynamicInputFormat.getSplitRatio(3,200,conf));
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
/**
* @throws IOExceptionThere should be files in the directory named by
* ${test.build.data}/rumen/histogram-test .
* There will be pairs of files, inputXxx.json and goldXxx.json .
* We read the input file as a HistogramRawTestData in json. Then we
* create a Histogram using the data field, and then a
* LoggedDiscreteCDF using the percentiles and scale field. Finally,
* we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
* deepCompare them.
*/
@Test public void testHistograms() throws IOException {
final Configuration conf=new Configuration();
final FileSystem lfs=FileSystem.getLocal(conf);
final Path rootInputDir=new Path(System.getProperty("test.tools.input.dir","")).makeQualified(lfs);
final Path rootInputFile=new Path(rootInputDir,"rumen/histogram-tests");
FileStatus[] tests=lfs.listStatus(rootInputFile);
for (int i=0; i < tests.length; ++i) {
Path filePath=tests[i].getPath();
String fileName=filePath.getName();
if (fileName.startsWith("input")) {
String testName=fileName.substring("input".length());
Path goldFilePath=new Path(rootInputFile,"gold" + testName);
assertTrue("Gold file dies not exist",lfs.exists(goldFilePath));
LoggedDiscreteCDF newResult=histogramFileToCDF(filePath,lfs);
System.out.println("Testing a Histogram for " + fileName);
FSDataInputStream goldStream=lfs.open(goldFilePath);
JsonObjectMapperParser parser=new JsonObjectMapperParser(goldStream,LoggedDiscreteCDF.class);
try {
LoggedDiscreteCDF dcdf=parser.getNext();
dcdf.deepCompare(newResult,new TreePath(null,""));
}
catch ( DeepInequalityException e) {
fail(e.path.toString());
}
finally {
parser.close();
}
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testOneRun(){
LoggedDiscreteCDF input=new LoggedDiscreteCDF();
input.setMinimum(100000L);
input.setMaximum(1100000L);
ArrayList rankings=new ArrayList();
rankings.add(makeRR(0.1,200000L));
rankings.add(makeRR(0.5,800000L));
rankings.add(makeRR(0.9,1000000L));
input.setRankings(rankings);
input.setNumberValues(3);
CDFRandomGenerator gen=new CDFPiecewiseLinearRandomGenerator(input);
Histogram values=new Histogram();
for (int i=0; i < 1000000; ++i) {
long value=gen.randomValue();
values.enter(value);
}
int[] percentiles=new int[99];
for (int i=0; i < 99; ++i) {
percentiles[i]=i + 1;
}
long[] result=values.getCDF(100,percentiles);
long sumErrorSquares=0L;
for (int i=0; i < 10; ++i) {
long error=result[i] - (10000L * i + 100000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (10000L * i + 100000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
for (int i=10; i < 50; ++i) {
long error=result[i] - (15000L * i + 50000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (15000L * i + 50000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
for (int i=50; i < 90; ++i) {
long error=result[i] - (5000L * i + 550000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (5000L * i + 550000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
for (int i=90; i <= 100; ++i) {
long error=result[i] - (10000L * i + 100000L);
System.out.println("element " + i + ", got "+ result[i]+ ", expected "+ (10000L * i + 100000L)+ ", error = "+ error);
sumErrorSquares+=error * error;
}
double realSumErrorSquares=(double)sumErrorSquares;
double normalizedError=realSumErrorSquares / 100 / rankings.get(1).getDatum()/ rankings.get(1).getDatum();
double RMSNormalizedError=Math.sqrt(normalizedError);
System.out.println("sumErrorSquares = " + sumErrorSquares);
System.out.println("normalizedError: " + normalizedError + ", RMSNormalizedError: "+ RMSNormalizedError);
System.out.println("Cumulative error is " + RMSNormalizedError);
assertTrue("The RMS relative error per bucket, " + RMSNormalizedError + ", exceeds our tolerance of "+ maximumRelativeError,RMSNormalizedError <= maximumRelativeError);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPreserve(){
try {
FileSystem fs=FileSystem.get(config);
EnumSet attributes=EnumSet.noneOf(FileAttribute.class);
Path path=new Path("/tmp/abc");
Path src=new Path("/tmp/src");
fs.mkdirs(path);
fs.mkdirs(src);
CopyListingFileStatus srcStatus=new CopyListingFileStatus(fs.getFileStatus(src));
FsPermission noPerm=new FsPermission((short)0);
fs.setPermission(path,noPerm);
fs.setOwner(path,"nobody","nobody");
DistCpUtils.preserve(fs,path,srcStatus,attributes,false);
FileStatus target=fs.getFileStatus(path);
Assert.assertEquals(target.getPermission(),noPerm);
Assert.assertEquals(target.getOwner(),"nobody");
Assert.assertEquals(target.getGroup(),"nobody");
attributes.add(FileAttribute.PERMISSION);
DistCpUtils.preserve(fs,path,srcStatus,attributes,false);
target=fs.getFileStatus(path);
Assert.assertEquals(target.getPermission(),srcStatus.getPermission());
Assert.assertEquals(target.getOwner(),"nobody");
Assert.assertEquals(target.getGroup(),"nobody");
attributes.add(FileAttribute.GROUP);
attributes.add(FileAttribute.USER);
DistCpUtils.preserve(fs,path,srcStatus,attributes,false);
target=fs.getFileStatus(path);
Assert.assertEquals(target.getPermission(),srcStatus.getPermission());
Assert.assertEquals(target.getOwner(),srcStatus.getOwner());
Assert.assertEquals(target.getGroup(),srcStatus.getGroup());
fs.delete(path,true);
fs.delete(src,true);
}
catch ( IOException e) {
LOG.error("Exception encountered ",e);
Assert.fail("Preserve test failure");
}
}
InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testBuffersAreReset(){
ByteBuffer a=pool.getBuffer(100);
a.putInt(0xdeadbeef);
assertEquals(96,a.remaining());
pool.returnBuffer(a);
ByteBuffer b=pool.getBuffer(100);
assertSame(a,b);
assertEquals(100,a.remaining());
pool.returnBuffer(b);
}
InternalCallVerifier EqualityVerifier
@Test public void testWeakRefClearing(){
List bufs=Lists.newLinkedList();
for (int i=0; i < 10; i++) {
ByteBuffer buf=pool.getBuffer(100);
bufs.add(buf);
}
for ( ByteBuffer buf : bufs) {
pool.returnBuffer(buf);
}
assertEquals(10,pool.countBuffersOfSize(100));
bufs.clear();
bufs=null;
for (int i=0; i < 3; i++) {
System.gc();
}
ByteBuffer buf=pool.getBuffer(100);
assertEquals(0,pool.countBuffersOfSize(100));
pool.returnBuffer(buf);
}
InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testBasics(){
ByteBuffer a=pool.getBuffer(100);
assertEquals(100,a.capacity());
assertEquals(100,a.remaining());
pool.returnBuffer(a);
ByteBuffer b=pool.getBuffer(100);
assertSame(a,b);
ByteBuffer c=pool.getBuffer(100);
assertNotSame(b,c);
pool.returnBuffer(b);
pool.returnBuffer(c);
}
InternalCallVerifier BooleanVerifier
/**
* Add a bunch of IPS to the file
* Check for inclusion
* Check for exclusion
*/
@Test public void testSubnetsAndIPs() throws IOException {
String[] ips={"10.119.103.112","10.221.102.0/23"};
createFileWithEntries("ips.txt",ips);
IPList ipList=new FileBasedIPList("ips.txt");
assertTrue("10.119.103.112 is not in the list",ipList.isIn("10.119.103.112"));
assertFalse("10.119.103.113 is in the list",ipList.isIn("10.119.103.113"));
assertTrue("10.221.102.0 is not in the list",ipList.isIn("10.221.102.0"));
assertTrue("10.221.102.1 is not in the list",ipList.isIn("10.221.102.1"));
assertTrue("10.221.103.1 is not in the list",ipList.isIn("10.221.103.1"));
assertTrue("10.221.103.255 is not in the list",ipList.isIn("10.221.103.255"));
assertFalse("10.221.104.0 is in the list",ipList.isIn("10.221.104.0"));
assertFalse("10.221.104.1 is in the list",ipList.isIn("10.221.104.1"));
}
InternalCallVerifier BooleanVerifier
/**
* Add a bunch of subnets and IPSs to the file
* Check for inclusion
* Check for exclusion
*/
@Test public void testWithMultipleSubnetAndIPs() throws IOException {
String[] ips={"10.119.103.112","10.221.102.0/23","10.222.0.0/16","10.113.221.221"};
createFileWithEntries("ips.txt",ips);
IPList ipList=new FileBasedIPList("ips.txt");
assertTrue("10.119.103.112 is not in the list",ipList.isIn("10.119.103.112"));
assertFalse("10.119.103.113 is in the list",ipList.isIn("10.119.103.113"));
assertTrue("10.221.103.121 is not in the list",ipList.isIn("10.221.103.121"));
assertFalse("10.221.104.0 is in the list",ipList.isIn("10.221.104.0"));
assertTrue("10.222.103.121 is not in the list",ipList.isIn("10.222.103.121"));
assertFalse("10.223.104.0 is in the list",ipList.isIn("10.223.104.0"));
assertTrue("10.113.221.221 is not in the list",ipList.isIn("10.113.221.221"));
assertFalse("10.113.221.222 is in the list",ipList.isIn("10.113.221.222"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostFileReaderWithCommentsOnly() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(0,includesLen);
assertEquals(0,excludesLen);
assertFalse(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostFileReaderWithTabs() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.write(" \n");
efw.write(" somehost \t somehost2 \n somehost4");
efw.write(" somehost3 \t # somehost5");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.write(" \n");
ifw.write(" somehost \t somehost2 \n somehost4");
ifw.write(" somehost3 \t # somehost5");
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(4,includesLen);
assertEquals(4,excludesLen);
assertTrue(hfp.getHosts().contains("somehost2"));
assertFalse(hfp.getHosts().contains("somehost5"));
assertTrue(hfp.getExcludedHosts().contains("somehost2"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostFileReaderWithSpaces() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.write(" somehost somehost2");
efw.write(" somehost3 # somehost4");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.write(" somehost somehost2");
ifw.write(" somehost3 # somehost4");
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(3,includesLen);
assertEquals(3,excludesLen);
assertTrue(hfp.getHosts().contains("somehost3"));
assertFalse(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getHosts().contains("somehost4"));
assertTrue(hfp.getExcludedHosts().contains("somehost3"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("somehost4"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostFileReaderWithNull() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.close();
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(0,includesLen);
assertEquals(0,excludesLen);
assertFalse(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("somehost5"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHostsFileReader() throws Exception {
FileWriter efw=new FileWriter(excludesFile);
FileWriter ifw=new FileWriter(includesFile);
efw.write("#DFS-Hosts-excluded\n");
efw.write("somehost1\n");
efw.write("#This-is-comment\n");
efw.write("somehost2\n");
efw.write("somehost3 # host3\n");
efw.write("somehost4\n");
efw.write("somehost4 somehost5\n");
efw.close();
ifw.write("#Hosts-in-DFS\n");
ifw.write("somehost1\n");
ifw.write("somehost2\n");
ifw.write("somehost3\n");
ifw.write("#This-is-comment\n");
ifw.write("somehost4 # host4\n");
ifw.write("somehost4 somehost5\n");
ifw.close();
HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile);
int includesLen=hfp.getHosts().size();
int excludesLen=hfp.getExcludedHosts().size();
assertEquals(5,includesLen);
assertEquals(5,excludesLen);
assertTrue(hfp.getHosts().contains("somehost5"));
assertFalse(hfp.getHosts().contains("host3"));
assertTrue(hfp.getExcludedHosts().contains("somehost5"));
assertFalse(hfp.getExcludedHosts().contains("host4"));
}
IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAdditionsAndRemovals(){
IdentityHashStore store=new IdentityHashStore(0);
final int NUM_KEYS=1000;
LOG.debug("generating " + NUM_KEYS + " keys");
final List keys=new ArrayList(NUM_KEYS);
for (int i=0; i < NUM_KEYS; i++) {
keys.add(new Key("key " + i));
}
for (int i=0; i < NUM_KEYS; i++) {
store.put(keys.get(i),i);
}
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.assertTrue(keys.contains(k));
}
}
);
for (int i=0; i < NUM_KEYS; i++) {
Assert.assertEquals(Integer.valueOf(i),store.remove(keys.get(i)));
}
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("expected all entries to be removed");
}
}
);
Assert.assertTrue("expected the store to be " + "empty, but found " + store.numElements() + " elements.",store.isEmpty());
Assert.assertEquals(1024,store.capacity());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testStartingWithZeroCapacity(){
IdentityHashStore store=new IdentityHashStore(0);
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
}
);
Assert.assertTrue(store.isEmpty());
final Key key1=new Key("key1");
Integer value1=new Integer(100);
store.put(key1,value1);
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1,store.get(key1));
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.assertEquals(key1,k);
}
}
);
Assert.assertEquals(value1,store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testDuplicateInserts(){
IdentityHashStore store=new IdentityHashStore(4);
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
Assert.fail("found key " + k + " in empty IdentityHashStore.");
}
}
);
Assert.assertTrue(store.isEmpty());
Key key1=new Key("key1");
Integer value1=new Integer(100);
Integer value2=new Integer(200);
Integer value3=new Integer(300);
store.put(key1,value1);
Key equalToKey1=new Key("key1");
Assert.assertNull(store.get(equalToKey1));
Assert.assertTrue(!store.isEmpty());
Assert.assertEquals(value1,store.get(key1));
store.put(key1,value2);
store.put(key1,value3);
final List allValues=new LinkedList();
store.visitAll(new Visitor(){
@Override public void accept( Key k, Integer v){
allValues.add(v);
}
}
);
Assert.assertEquals(3,allValues.size());
for (int i=0; i < 3; i++) {
Integer value=store.remove(key1);
Assert.assertTrue(allValues.remove(value));
}
Assert.assertNull(store.remove(key1));
Assert.assertTrue(store.isEmpty());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRemoveAllViaIterator(){
ArrayList list=getRandomList(100,123);
LightWeightGSet set=new LightWeightGSet(16);
for ( Integer i : list) {
set.put(new TestElement(i));
}
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
iter.next();
iter.remove();
}
Assert.assertEquals(0,set.size());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testRemoveSomeViaIterator(){
ArrayList list=getRandomList(100,123);
LightWeightGSet set=new LightWeightGSet(16);
for ( Integer i : list) {
set.put(new TestElement(i));
}
long sum=0;
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
sum+=iter.next().getVal();
}
long mode=sum / set.size();
LOG.info("Removing all elements above " + mode);
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
int item=iter.next().getVal();
if (item > mode) {
iter.remove();
}
}
for (Iterator iter=set.iterator(); iter.hasNext(); ) {
Assert.assertTrue(iter.next().getVal() <= mode);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testCustomDelimiter() throws Exception {
Delimiter="";
String CurrentBufferTailToken="Gelesh";
String NextBufferHeadToken="id>Omathil ";
String Expected=(CurrentBufferTailToken + NextBufferHeadToken).replace(Delimiter,"");
String TestPartOfInput=CurrentBufferTailToken + NextBufferHeadToken;
int BufferSize=64 * 1024;
int numberOfCharToFillTheBuffer=BufferSize - CurrentBufferTailToken.length();
StringBuilder fillerString=new StringBuilder();
for (int i=0; i < numberOfCharToFillTheBuffer; i++) {
fillerString.append('a');
}
TestData=fillerString + TestPartOfInput;
lineReader=new LineReader(new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
line=new Text();
lineReader.readLine(line);
Assert.assertEquals(fillerString.toString(),line.toString());
lineReader.readLine(line);
Assert.assertEquals(Expected,line.toString());
Delimiter="record";
StringBuilder TestStringBuilder=new StringBuilder();
TestStringBuilder.append(Delimiter + "Kerala ");
TestStringBuilder.append(Delimiter + "Bangalore");
TestStringBuilder.append(Delimiter + " North Korea");
TestStringBuilder.append(Delimiter + Delimiter + "Guantanamo");
TestStringBuilder.append(Delimiter + "ecord" + "recor"+ "core");
TestData=TestStringBuilder.toString();
lineReader=new LineReader(new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes());
lineReader.readLine(line);
Assert.assertEquals("",line.toString());
lineReader.readLine(line);
Assert.assertEquals("Kerala ",line.toString());
lineReader.readLine(line);
Assert.assertEquals("Bangalore",line.toString());
lineReader.readLine(line);
Assert.assertEquals(" North Korea",line.toString());
lineReader.readLine(line);
Assert.assertEquals("",line.toString());
lineReader.readLine(line);
Assert.assertEquals("Guantanamo",line.toString());
lineReader.readLine(line);
Assert.assertEquals(("ecord" + "recor" + "core"),line.toString());
}
InternalCallVerifier BooleanVerifier
@Test public void testCIDRs(){
MachineList ml=new MachineList(CIDR_LIST);
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.0.1"));
assertTrue(ml.includes("10.222.0.255"));
assertTrue(ml.includes("10.222.255.0"));
assertTrue(ml.includes("10.222.255.254"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.1"));
assertTrue(ml.includes("10.241.23.254"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.119.103.111"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostNames() throws UnknownHostException {
InetAddress addressHost1=InetAddress.getByName("1.2.3.1");
InetAddress addressHost4=InetAddress.getByName("1.2.3.4");
InetAddress addressMockHost4=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("differentName");
InetAddress addressMockHost5=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5");
MachineList.InetAddressFactory addressFactory=Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4);
Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml=new MachineList(StringUtils.getTrimmedStringCollection(HOST_LIST),addressFactory);
assertTrue(ml.includes("1.2.3.4"));
assertFalse(ml.includes("1.2.3.5"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testStaticIPHostNameList() throws UnknownHostException {
InetAddress addressHost1=InetAddress.getByName("1.2.3.1");
InetAddress addressHost4=InetAddress.getByName("1.2.3.4");
MachineList.InetAddressFactory addressFactory=Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml=new MachineList(StringUtils.getTrimmedStringCollection(HOST_LIST),addressFactory);
assertTrue(ml.includes("1.2.3.4"));
assertFalse(ml.includes("1.2.3.5"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testHostNamesReverserIpMatch() throws UnknownHostException {
InetAddress addressHost1=InetAddress.getByName("1.2.3.1");
InetAddress addressHost4=InetAddress.getByName("1.2.3.4");
InetAddress addressMockHost4=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost4.getCanonicalHostName()).thenReturn("host4");
InetAddress addressMockHost5=Mockito.mock(InetAddress.class);
Mockito.when(addressMockHost5.getCanonicalHostName()).thenReturn("host5");
MachineList.InetAddressFactory addressFactory=Mockito.mock(MachineList.InetAddressFactory.class);
Mockito.when(addressFactory.getByName("1.2.3.4")).thenReturn(addressMockHost4);
Mockito.when(addressFactory.getByName("1.2.3.5")).thenReturn(addressMockHost5);
Mockito.when(addressFactory.getByName("host1")).thenReturn(addressHost1);
Mockito.when(addressFactory.getByName("host4")).thenReturn(addressHost4);
MachineList ml=new MachineList(StringUtils.getTrimmedStringCollection(HOST_LIST),addressFactory);
assertTrue(ml.includes("1.2.3.4"));
assertFalse(ml.includes("1.2.3.5"));
}
InternalCallVerifier BooleanVerifier
@Test public void testCIDRWith16bitmask(){
MachineList ml=new MachineList(CIDR_LIST1);
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.0.1"));
assertTrue(ml.includes("10.222.0.255"));
assertTrue(ml.includes("10.222.255.0"));
assertTrue(ml.includes("10.222.255.254"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertFalse(ml.includes("10.119.103.111"));
}
InternalCallVerifier BooleanVerifier
@Test public void testCIDRWith8BitMask(){
MachineList ml=new MachineList(CIDR_LIST2);
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.1"));
assertTrue(ml.includes("10.241.23.254"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
assertFalse(ml.includes("10.119.103.111"));
}
InternalCallVerifier BooleanVerifier
@Test public void testIPandCIDRs(){
MachineList ml=new MachineList(IP_CIDR_LIST);
assertTrue(ml.includes("10.119.103.112"));
assertFalse(ml.includes("10.119.103.111"));
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
}
InternalCallVerifier BooleanVerifier
@Test public void testIPList(){
MachineList ml=new MachineList(IP_LIST);
assertTrue(ml.includes("10.119.103.112"));
assertFalse(ml.includes("10.119.103.111"));
}
InternalCallVerifier BooleanVerifier
@Test public void testHostNameIPandCIDRs(){
MachineList ml=new MachineList(HOSTNAME_IP_CIDR_LIST);
assertTrue(ml.includes("10.119.103.112"));
assertFalse(ml.includes("10.119.103.111"));
assertFalse(ml.includes("10.221.255.255"));
assertTrue(ml.includes("10.222.0.0"));
assertTrue(ml.includes("10.222.255.255"));
assertFalse(ml.includes("10.223.0.0"));
assertFalse(ml.includes("10.241.22.255"));
assertTrue(ml.includes("10.241.23.0"));
assertTrue(ml.includes("10.241.23.255"));
assertFalse(ml.includes("10.241.24.0"));
}
InternalCallVerifier BooleanVerifier
@Test public void testWildCard(){
MachineList ml=new MachineList("*");
assertTrue(ml.includes("10.119.103.112"));
assertTrue(ml.includes("1.2.3.4"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetCollection(){
MachineList ml=new MachineList(HOSTNAME_IP_CIDR_LIST);
Collection col=ml.getCollection();
assertEquals(7,ml.getCollection().size());
for ( String item : StringUtils.getTrimmedStringCollection(HOSTNAME_IP_CIDR_LIST)) {
assertTrue(col.contains(item));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testIPListSpaces(){
MachineList ml=new MachineList(IP_LIST_SPACES);
assertTrue(ml.includes("10.119.103.112"));
assertFalse(ml.includes("10.119.103.111"));
}
InternalCallVerifier EqualityVerifier
@Test public void testSet(){
Progress progress=new Progress();
progress.set(Float.NaN);
Assert.assertEquals(0,progress.getProgress(),0.0);
progress.set(Float.NEGATIVE_INFINITY);
Assert.assertEquals(0,progress.getProgress(),0.0);
progress.set(-1);
Assert.assertEquals(0,progress.getProgress(),0.0);
progress.set((float)1.1);
Assert.assertEquals(1,progress.getProgress(),0.0);
progress.set(Float.POSITIVE_INFINITY);
Assert.assertEquals(1,progress.getProgress(),0.0);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void shutdownHookManager(){
ShutdownHookManager mgr=ShutdownHookManager.get();
Assert.assertNotNull(mgr);
Assert.assertEquals(0,mgr.getShutdownHooksInOrder().size());
Runnable hook1=new Runnable(){
@Override public void run(){
}
}
;
Runnable hook2=new Runnable(){
@Override public void run(){
}
}
;
mgr.addShutdownHook(hook1,0);
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size());
Assert.assertEquals(hook1,mgr.getShutdownHooksInOrder().get(0));
mgr.removeShutdownHook(hook1);
Assert.assertFalse(mgr.hasShutdownHook(hook1));
mgr.addShutdownHook(hook1,0);
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size());
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size());
mgr.addShutdownHook(hook2,1);
Assert.assertTrue(mgr.hasShutdownHook(hook1));
Assert.assertTrue(mgr.hasShutdownHook(hook2));
Assert.assertEquals(2,mgr.getShutdownHooksInOrder().size());
Assert.assertEquals(hook2,mgr.getShutdownHooksInOrder().get(0));
Assert.assertEquals(hook1,mgr.getShutdownHooksInOrder().get(1));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGoodAuths(){
List result=ZKUtil.parseAuth("scheme:data,\n scheme2:user:pass");
assertEquals(2,result.size());
ZKAuthInfo auth0=result.get(0);
assertEquals("scheme",auth0.getScheme());
assertEquals("data",new String(auth0.getAuth()));
ZKAuthInfo auth1=result.get(1);
assertEquals("scheme2",auth1.getScheme());
assertEquals("user:pass",new String(auth1.getAuth()));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGoodACLs(){
List result=ZKUtil.parseACLs("sasl:hdfs/host1@MY.DOMAIN:cdrwa, sasl:hdfs/host2@MY.DOMAIN:ca");
ACL acl0=result.get(0);
assertEquals(Perms.CREATE | Perms.DELETE | Perms.READ| Perms.WRITE| Perms.ADMIN,acl0.getPerms());
assertEquals("sasl",acl0.getId().getScheme());
assertEquals("hdfs/host1@MY.DOMAIN",acl0.getId().getId());
ACL acl1=result.get(1);
assertEquals(Perms.CREATE | Perms.ADMIN,acl1.getPerms());
assertEquals("sasl",acl1.getId().getScheme());
assertEquals("hdfs/host2@MY.DOMAIN",acl1.getId().getId());
}
InternalCallVerifier BooleanVerifier
@Test public void testCountingBloomFilter(){
int hashId=Hash.JENKINS_HASH;
CountingBloomFilter filter=new CountingBloomFilter(bitSize,hashFunctionNumber,hashId);
Key key=new Key(new byte[]{48,48});
filter.add(key);
assertTrue("CountingBloomFilter.membership error ",filter.membershipTest(key));
assertTrue("CountingBloomFilter.approximateCount error",filter.approximateCount(key) == 1);
filter.add(key);
assertTrue("CountingBloomFilter.approximateCount error",filter.approximateCount(key) == 2);
filter.delete(key);
assertTrue("CountingBloomFilter.membership error ",filter.membershipTest(key));
filter.delete(key);
assertFalse("CountingBloomFilter.membership error ",filter.membershipTest(key));
assertTrue("CountingBloomFilter.approximateCount error",filter.approximateCount(key) == 0);
BloomFilterCommonTester.of(hashId,numInsertions).withFilterInstance(filter).withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,BloomFilterTestStrategy.ADD_KEYS_STRATEGY,BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,BloomFilterTestStrategy.WRITE_READ_STRATEGY,BloomFilterTestStrategy.FILTER_OR_STRATEGY,BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testHash(){
int iterations=30;
assertTrue("testHash jenkins error !!!",Hash.JENKINS_HASH == Hash.parseHashType("jenkins"));
assertTrue("testHash murmur error !!!",Hash.MURMUR_HASH == Hash.parseHashType("murmur"));
assertTrue("testHash undefined",Hash.INVALID_HASH == Hash.parseHashType("undefined"));
Configuration cfg=new Configuration();
cfg.set("hadoop.util.hash.type","murmur");
assertTrue("testHash",MurmurHash.getInstance() == Hash.getInstance(cfg));
cfg=new Configuration();
cfg.set("hadoop.util.hash.type","jenkins");
assertTrue("testHash jenkins configuration error !!!",JenkinsHash.getInstance() == Hash.getInstance(cfg));
cfg=new Configuration();
assertTrue("testHash undefine configuration error !!!",MurmurHash.getInstance() == Hash.getInstance(cfg));
assertTrue("testHash error jenkin getInstance !!!",JenkinsHash.getInstance() == Hash.getInstance(Hash.JENKINS_HASH));
assertTrue("testHash error murmur getInstance !!!",MurmurHash.getInstance() == Hash.getInstance(Hash.MURMUR_HASH));
assertNull("testHash error invalid getInstance !!!",Hash.getInstance(Hash.INVALID_HASH));
int murmurHash=Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes());
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation murmur hash error !!!",murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes()));
}
murmurHash=Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(),67);
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation murmur hash error !!!",murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(),67));
}
int jenkinsHash=Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes());
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation jenkins hash error !!!",jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes()));
}
jenkinsHash=Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(),67);
for (int i=0; i < iterations; i++) {
assertTrue("multiple evaluation jenkins hash error !!!",jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(),67));
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPbRecordFactory(){
RecordFactory pbRecordFactory=RecordFactoryPBImpl.get();
try {
AllocateResponse response=pbRecordFactory.newRecordInstance(AllocateResponse.class);
Assert.assertEquals(AllocateResponsePBImpl.class,response.getClass());
}
catch ( YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
try {
AllocateRequest response=pbRecordFactory.newRecordInstance(AllocateRequest.class);
Assert.assertEquals(AllocateRequestPBImpl.class,response.getClass());
}
catch ( YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test the method registerNodeManager. Method should return a not null
* result.
*/
@Test public void testResourceTrackerPBClientImpl() throws Exception {
RegisterNodeManagerRequest request=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
assertNotNull(client.registerNodeManager(request));
ResourceTrackerTestImpl.exception=true;
try {
client.registerNodeManager(request);
fail("there should be YarnException");
}
catch ( YarnException e) {
assertTrue(e.getMessage().startsWith("testMessage"));
}
finally {
ResourceTrackerTestImpl.exception=false;
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Test the method nodeHeartbeat. Method should return a not null result.
*/
@Test public void testNodeHeartbeat() throws Exception {
NodeHeartbeatRequest request=recordFactory.newRecordInstance(NodeHeartbeatRequest.class);
assertNotNull(client.nodeHeartbeat(request));
ResourceTrackerTestImpl.exception=true;
try {
client.nodeHeartbeat(request);
fail("there should be YarnException");
}
catch ( YarnException e) {
assertTrue(e.getMessage().startsWith("testMessage"));
}
finally {
ResourceTrackerTestImpl.exception=false;
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPbRecordFactory(){
RecordFactory pbRecordFactory=RecordFactoryPBImpl.get();
try {
NodeHeartbeatRequest request=pbRecordFactory.newRecordInstance(NodeHeartbeatRequest.class);
Assert.assertEquals(NodeHeartbeatRequestPBImpl.class,request.getClass());
}
catch ( YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
InternalCallVerifier EqualityVerifier
/**
* Test NodeStatusPBImpl.
*/
@Test public void testNodeStatusPBImpl(){
NodeStatusPBImpl original=new NodeStatusPBImpl();
original.setContainersStatuses(Arrays.asList(getContainerStatus(1,2,1),getContainerStatus(2,3,1)));
original.setKeepAliveApplications(Arrays.asList(getApplicationId(3),getApplicationId(4)));
original.setNodeHealthStatus(getNodeHealthStatus());
original.setNodeId(getNodeId());
original.setResponseId(1);
NodeStatusPBImpl copy=new NodeStatusPBImpl(original.getProto());
assertEquals(3,copy.getContainersStatuses().get(1).getContainerId().getId());
assertEquals(3,copy.getKeepAliveApplications().get(0).getId());
assertEquals(1000,copy.getNodeHealthStatus().getLastHealthReportTime());
assertEquals(9090,copy.getNodeId().getPort());
assertEquals(1,copy.getResponseId());
}
InternalCallVerifier EqualityVerifier
/**
* Test NodeHeartbeatResponsePBImpl.
*/
@Test public void testNodeHeartbeatResponsePBImpl(){
NodeHeartbeatResponsePBImpl original=new NodeHeartbeatResponsePBImpl();
original.setDiagnosticsMessage("testDiagnosticMessage");
original.setContainerTokenMasterKey(getMasterKey());
original.setNMTokenMasterKey(getMasterKey());
original.setNextHeartBeatInterval(1000);
original.setNodeAction(NodeAction.NORMAL);
original.setResponseId(100);
NodeHeartbeatResponsePBImpl copy=new NodeHeartbeatResponsePBImpl(original.getProto());
assertEquals(100,copy.getResponseId());
assertEquals(NodeAction.NORMAL,copy.getNodeAction());
assertEquals(1000,copy.getNextHeartBeatInterval());
assertEquals(1,copy.getContainerTokenMasterKey().getKeyId());
assertEquals(1,copy.getNMTokenMasterKey().getKeyId());
assertEquals("testDiagnosticMessage",copy.getDiagnosticsMessage());
}
InternalCallVerifier EqualityVerifier
/**
* Test NodeHeartbeatRequestPBImpl.
*/
@Test public void testNodeHeartbeatRequestPBImpl(){
NodeHeartbeatRequestPBImpl original=new NodeHeartbeatRequestPBImpl();
original.setLastKnownContainerTokenMasterKey(getMasterKey());
original.setLastKnownNMTokenMasterKey(getMasterKey());
original.setNodeStatus(getNodeStatus());
NodeHeartbeatRequestPBImpl copy=new NodeHeartbeatRequestPBImpl(original.getProto());
assertEquals(1,copy.getLastKnownContainerTokenMasterKey().getKeyId());
assertEquals(1,copy.getLastKnownNMTokenMasterKey().getKeyId());
assertEquals("localhost",copy.getNodeStatus().getNodeId().getHost());
}
InternalCallVerifier EqualityVerifier
/**
* Test RegisterNodeManagerResponsePBImpl. Test getters and setters. The
* RegisterNodeManagerResponsePBImpl should generate a prototype and data
* restore from prototype
*/
@Test public void testRegisterNodeManagerResponsePBImpl(){
RegisterNodeManagerResponsePBImpl original=new RegisterNodeManagerResponsePBImpl();
original.setContainerTokenMasterKey(getMasterKey());
original.setNMTokenMasterKey(getMasterKey());
original.setNodeAction(NodeAction.NORMAL);
original.setDiagnosticsMessage("testDiagnosticMessage");
RegisterNodeManagerResponsePBImpl copy=new RegisterNodeManagerResponsePBImpl(original.getProto());
assertEquals(1,copy.getContainerTokenMasterKey().getKeyId());
assertEquals(1,copy.getNMTokenMasterKey().getKeyId());
assertEquals(NodeAction.NORMAL,copy.getNodeAction());
assertEquals("testDiagnosticMessage",copy.getDiagnosticsMessage());
}
InternalCallVerifier EqualityVerifier
/**
* Test RegisterNodeManagerRequestPBImpl.
*/
@Test public void testRegisterNodeManagerRequestPBImpl(){
RegisterNodeManagerRequestPBImpl original=new RegisterNodeManagerRequestPBImpl();
original.setHttpPort(8080);
original.setNodeId(getNodeId());
Resource resource=recordFactory.newRecordInstance(Resource.class);
resource.setMemory(10000);
resource.setVirtualCores(2);
original.setResource(resource);
RegisterNodeManagerRequestPBImpl copy=new RegisterNodeManagerRequestPBImpl(original.getProto());
assertEquals(8080,copy.getHttpPort());
assertEquals(9090,copy.getNodeId().getPort());
assertEquals(10000,copy.getResource().getMemory());
assertEquals(2,copy.getResource().getVirtualCores());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test MasterKeyPBImpl.
*/
@Test public void testMasterKeyPBImpl(){
MasterKeyPBImpl original=new MasterKeyPBImpl();
original.setBytes(ByteBuffer.allocate(0));
original.setKeyId(1);
MasterKeyPBImpl copy=new MasterKeyPBImpl(original.getProto());
assertEquals(1,copy.getKeyId());
assertTrue(original.equals(copy));
assertEquals(original.hashCode(),copy.hashCode());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test SerializedExceptionPBImpl.
*/
@Test public void testSerializedExceptionPBImpl(){
SerializedExceptionPBImpl original=new SerializedExceptionPBImpl();
original.init("testMessage");
SerializedExceptionPBImpl copy=new SerializedExceptionPBImpl(original.getProto());
assertEquals("testMessage",copy.getMessage());
original=new SerializedExceptionPBImpl();
original.init("testMessage",new Throwable(new Throwable("parent")));
copy=new SerializedExceptionPBImpl(original.getProto());
assertEquals("testMessage",copy.getMessage());
assertEquals("parent",copy.getCause().getMessage());
assertTrue(copy.getRemoteTrace().startsWith("java.lang.Throwable: java.lang.Throwable: parent"));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testAllcoateRequestWithIncrease(){
List incRequests=new ArrayList();
for (int i=0; i < 3; i++) {
incRequests.add(ContainerResourceIncreaseRequest.newInstance(null,Resource.newInstance(0,i)));
}
AllocateRequest r=AllocateRequest.newInstance(123,0f,null,null,null,incRequests);
AllocateRequestProto p=((AllocateRequestPBImpl)r).getProto();
r=new AllocateRequestPBImpl(p);
Assert.assertEquals(123,r.getResponseId());
Assert.assertEquals(incRequests.size(),r.getIncreaseRequests().size());
for (int i=0; i < incRequests.size(); i++) {
Assert.assertEquals(r.getIncreaseRequests().get(i).getCapability().getVirtualCores(),incRequests.get(i).getCapability().getVirtualCores());
}
}
InternalCallVerifier EqualityVerifier
@Test public void testAllcoateRequestWithoutIncrease(){
AllocateRequest r=AllocateRequest.newInstance(123,0f,null,null,null,null);
AllocateRequestProto p=((AllocateRequestPBImpl)r).getProto();
r=new AllocateRequestPBImpl(p);
Assert.assertEquals(123,r.getResponseId());
Assert.assertEquals(0,r.getIncreaseRequests().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testAllocateResponseWithoutIncDecContainers(){
AllocateResponse r=AllocateResponse.newInstance(3,new ArrayList(),new ArrayList(),new ArrayList(),null,AMCommand.AM_RESYNC,3,null,new ArrayList(),null,null);
AllocateResponseProto p=((AllocateResponsePBImpl)r).getProto();
r=new AllocateResponsePBImpl(p);
Assert.assertEquals(0,r.getIncreasedContainers().size());
Assert.assertEquals(0,r.getDecreasedContainers().size());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testAllocateResponseWithIncDecContainers(){
List incContainers=new ArrayList();
List decContainers=new ArrayList();
for (int i=0; i < 3; i++) {
incContainers.add(ContainerResourceIncrease.newInstance(null,Resource.newInstance(1024,i),null));
}
for (int i=0; i < 5; i++) {
decContainers.add(ContainerResourceDecrease.newInstance(null,Resource.newInstance(1024,i)));
}
AllocateResponse r=AllocateResponse.newInstance(3,new ArrayList(),new ArrayList(),new ArrayList(),null,AMCommand.AM_RESYNC,3,null,new ArrayList(),incContainers,decContainers);
AllocateResponseProto p=((AllocateResponsePBImpl)r).getProto();
r=new AllocateResponsePBImpl(p);
Assert.assertEquals(incContainers.size(),r.getIncreasedContainers().size());
Assert.assertEquals(decContainers.size(),r.getDecreasedContainers().size());
for (int i=0; i < incContainers.size(); i++) {
Assert.assertEquals(i,r.getIncreasedContainers().get(i).getCapability().getVirtualCores());
}
for (int i=0; i < decContainers.size(); i++) {
Assert.assertEquals(i,r.getDecreasedContainers().get(i).getCapability().getVirtualCores());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationAttemptId(){
ApplicationAttemptId a1=createAppAttemptId(10l,1,1);
ApplicationAttemptId a2=createAppAttemptId(10l,1,2);
ApplicationAttemptId a3=createAppAttemptId(10l,2,1);
ApplicationAttemptId a4=createAppAttemptId(8l,1,4);
ApplicationAttemptId a5=createAppAttemptId(10l,1,1);
Assert.assertTrue(a1.equals(a5));
Assert.assertFalse(a1.equals(a2));
Assert.assertFalse(a1.equals(a3));
Assert.assertFalse(a1.equals(a4));
Assert.assertTrue(a1.compareTo(a5) == 0);
Assert.assertTrue(a1.compareTo(a2) < 0);
Assert.assertTrue(a1.compareTo(a3) < 0);
Assert.assertTrue(a1.compareTo(a4) > 0);
Assert.assertTrue(a1.hashCode() == a5.hashCode());
Assert.assertFalse(a1.hashCode() == a2.hashCode());
Assert.assertFalse(a1.hashCode() == a3.hashCode());
Assert.assertFalse(a1.hashCode() == a4.hashCode());
long ts=System.currentTimeMillis();
ApplicationAttemptId a6=createAppAttemptId(ts,543627,33492611);
Assert.assertEquals("appattempt_10_0001_000001",a1.toString());
Assert.assertEquals("appattempt_" + ts + "_543627_33492611",a6.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationId(){
ApplicationId a1=ApplicationId.newInstance(10l,1);
ApplicationId a2=ApplicationId.newInstance(10l,2);
ApplicationId a3=ApplicationId.newInstance(10l,1);
ApplicationId a4=ApplicationId.newInstance(8l,3);
Assert.assertFalse(a1.equals(a2));
Assert.assertFalse(a1.equals(a4));
Assert.assertTrue(a1.equals(a3));
Assert.assertTrue(a1.compareTo(a2) < 0);
Assert.assertTrue(a1.compareTo(a3) == 0);
Assert.assertTrue(a1.compareTo(a4) > 0);
Assert.assertTrue(a1.hashCode() == a3.hashCode());
Assert.assertFalse(a1.hashCode() == a2.hashCode());
Assert.assertFalse(a2.hashCode() == a4.hashCode());
long ts=System.currentTimeMillis();
ApplicationId a5=ApplicationId.newInstance(ts,45436343);
Assert.assertEquals("application_10_0001",a1.toString());
Assert.assertEquals("application_" + ts + "_45436343",a5.toString());
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationReport(){
long timestamp=System.currentTimeMillis();
ApplicationReport appReport1=createApplicationReport(1,1,timestamp);
ApplicationReport appReport2=createApplicationReport(1,1,timestamp);
ApplicationReport appReport3=createApplicationReport(1,1,timestamp);
Assert.assertEquals(appReport1,appReport2);
Assert.assertEquals(appReport2,appReport3);
appReport1.setApplicationId(null);
Assert.assertNull(appReport1.getApplicationId());
Assert.assertNotSame(appReport1,appReport2);
appReport2.setCurrentApplicationAttemptId(null);
Assert.assertNull(appReport2.getCurrentApplicationAttemptId());
Assert.assertNotSame(appReport2,appReport3);
Assert.assertNull(appReport1.getAMRMToken());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerId(){
ContainerId c1=newContainerId(1,1,10l,1);
ContainerId c2=newContainerId(1,1,10l,2);
ContainerId c3=newContainerId(1,1,10l,1);
ContainerId c4=newContainerId(1,3,10l,1);
ContainerId c5=newContainerId(1,3,8l,1);
Assert.assertTrue(c1.equals(c3));
Assert.assertFalse(c1.equals(c2));
Assert.assertFalse(c1.equals(c4));
Assert.assertFalse(c1.equals(c5));
Assert.assertTrue(c1.compareTo(c3) == 0);
Assert.assertTrue(c1.compareTo(c2) < 0);
Assert.assertTrue(c1.compareTo(c4) < 0);
Assert.assertTrue(c1.compareTo(c5) > 0);
Assert.assertTrue(c1.hashCode() == c3.hashCode());
Assert.assertFalse(c1.hashCode() == c2.hashCode());
Assert.assertFalse(c1.hashCode() == c4.hashCode());
Assert.assertFalse(c1.hashCode() == c5.hashCode());
long ts=System.currentTimeMillis();
ContainerId c6=newContainerId(36473,4365472,ts,25645811);
Assert.assertEquals("container_10_0001_01_000001",c1.toString());
Assert.assertEquals("container_" + ts + "_36473_4365472_25645811",c6.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testResourceDecreaseContext(){
ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7);
Resource resource=Resource.newInstance(1023,3);
ContainerResourceDecrease ctx=ContainerResourceDecrease.newInstance(containerId,resource);
ContainerResourceDecreaseProto proto=((ContainerResourceDecreasePBImpl)ctx).getProto();
ctx=new ContainerResourceDecreasePBImpl(proto);
Assert.assertEquals(ctx.getCapability(),resource);
Assert.assertEquals(ctx.getContainerId(),containerId);
}
InternalCallVerifier NullVerifier
@Test public void testResourceDecreaseContextWithNull(){
ContainerResourceDecrease ctx=ContainerResourceDecrease.newInstance(null,null);
ContainerResourceDecreaseProto proto=((ContainerResourceDecreasePBImpl)ctx).getProto();
ctx=new ContainerResourceDecreasePBImpl(proto);
Assert.assertNull(ctx.getCapability());
Assert.assertNull(ctx.getContainerId());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testResourceIncreaseContext(){
byte[] identifier=new byte[]{1,2,3,4};
Token token=Token.newInstance(identifier,"","".getBytes(),"");
ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7);
Resource resource=Resource.newInstance(1023,3);
ContainerResourceIncrease ctx=ContainerResourceIncrease.newInstance(containerId,resource,token);
ContainerResourceIncreaseProto proto=((ContainerResourceIncreasePBImpl)ctx).getProto();
ctx=new ContainerResourceIncreasePBImpl(proto);
Assert.assertEquals(ctx.getCapability(),resource);
Assert.assertEquals(ctx.getContainerId(),containerId);
Assert.assertTrue(Arrays.equals(ctx.getContainerToken().getIdentifier().array(),identifier));
}
InternalCallVerifier NullVerifier
@Test public void testResourceIncreaseContextWithNull(){
ContainerResourceIncrease ctx=ContainerResourceIncrease.newInstance(null,null,null);
ContainerResourceIncreaseProto proto=((ContainerResourceIncreasePBImpl)ctx).getProto();
ctx=new ContainerResourceIncreasePBImpl(proto);
Assert.assertNull(ctx.getContainerToken());
Assert.assertNull(ctx.getCapability());
Assert.assertNull(ctx.getContainerId());
}
InternalCallVerifier NullVerifier
@Test public void testResourceChangeContextWithNullField(){
ContainerResourceIncreaseRequest context=ContainerResourceIncreaseRequest.newInstance(null,null);
ContainerResourceIncreaseRequestProto proto=((ContainerResourceIncreaseRequestPBImpl)context).getProto();
ContainerResourceIncreaseRequest contextRecover=new ContainerResourceIncreaseRequestPBImpl(proto);
Assert.assertNull(contextRecover.getContainerId());
Assert.assertNull(contextRecover.getCapability());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void ContainerResourceIncreaseRequest(){
ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7);
Resource resource=Resource.newInstance(1023,3);
ContainerResourceIncreaseRequest context=ContainerResourceIncreaseRequest.newInstance(containerId,resource);
ContainerResourceIncreaseRequestProto proto=((ContainerResourceIncreaseRequestPBImpl)context).getProto();
ContainerResourceIncreaseRequest contextRecover=new ContainerResourceIncreaseRequestPBImpl(proto);
Assert.assertEquals(contextRecover.getContainerId(),containerId);
Assert.assertEquals(contextRecover.getCapability(),resource);
}
InternalCallVerifier EqualityVerifier
@Test public void testGetApplicationsRequest(){
GetApplicationsRequest request=GetApplicationsRequest.newInstance();
EnumSet appStates=EnumSet.of(YarnApplicationState.ACCEPTED);
request.setApplicationStates(appStates);
Set tags=new HashSet();
tags.add("tag1");
request.setApplicationTags(tags);
Set types=new HashSet();
types.add("type1");
request.setApplicationTypes(types);
long startBegin=System.currentTimeMillis();
long startEnd=System.currentTimeMillis() + 1;
request.setStartRange(startBegin,startEnd);
long finishBegin=System.currentTimeMillis() + 2;
long finishEnd=System.currentTimeMillis() + 3;
request.setFinishRange(finishBegin,finishEnd);
long limit=100L;
request.setLimit(limit);
Set queues=new HashSet();
queues.add("queue1");
request.setQueues(queues);
Set users=new HashSet();
users.add("user1");
request.setUsers(users);
ApplicationsRequestScope scope=ApplicationsRequestScope.ALL;
request.setScope(scope);
GetApplicationsRequest requestFromProto=new GetApplicationsRequestPBImpl(((GetApplicationsRequestPBImpl)request).getProto());
Assert.assertEquals(requestFromProto,request);
Assert.assertEquals("ApplicationStates from proto is not the same with original request",requestFromProto.getApplicationStates(),appStates);
Assert.assertEquals("ApplicationTags from proto is not the same with original request",requestFromProto.getApplicationTags(),tags);
Assert.assertEquals("ApplicationTypes from proto is not the same with original request",requestFromProto.getApplicationTypes(),types);
Assert.assertEquals("StartRange from proto is not the same with original request",requestFromProto.getStartRange(),new LongRange(startBegin,startEnd));
Assert.assertEquals("FinishRange from proto is not the same with original request",requestFromProto.getFinishRange(),new LongRange(finishBegin,finishEnd));
Assert.assertEquals("Limit from proto is not the same with original request",requestFromProto.getLimit(),limit);
Assert.assertEquals("Queues from proto is not the same with original request",requestFromProto.getQueues(),queues);
Assert.assertEquals("Users from proto is not the same with original request",requestFromProto.getUsers(),users);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNodeId(){
NodeId nodeId1=NodeId.newInstance("10.18.52.124",8041);
NodeId nodeId2=NodeId.newInstance("10.18.52.125",8038);
NodeId nodeId3=NodeId.newInstance("10.18.52.124",8041);
NodeId nodeId4=NodeId.newInstance("10.18.52.124",8039);
Assert.assertTrue(nodeId1.equals(nodeId3));
Assert.assertFalse(nodeId1.equals(nodeId2));
Assert.assertFalse(nodeId3.equals(nodeId4));
Assert.assertTrue(nodeId1.compareTo(nodeId3) == 0);
Assert.assertTrue(nodeId1.compareTo(nodeId2) < 0);
Assert.assertTrue(nodeId3.compareTo(nodeId4) > 0);
Assert.assertTrue(nodeId1.hashCode() == nodeId3.hashCode());
Assert.assertFalse(nodeId1.hashCode() == nodeId2.hashCode());
Assert.assertFalse(nodeId3.hashCode() == nodeId4.hashCode());
Assert.assertEquals("10.18.52.124:8041",nodeId1.toString());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testDeserialize() throws Exception {
Exception ex=new Exception("test exception");
SerializedExceptionPBImpl pb=new SerializedExceptionPBImpl();
try {
pb.deSerialize();
Assert.fail("deSerialze should throw YarnRuntimeException");
}
catch ( YarnRuntimeException e) {
Assert.assertEquals(ClassNotFoundException.class,e.getCause().getClass());
}
pb.init(ex);
Assert.assertEquals(ex.toString(),pb.deSerialize().toString());
}
InternalCallVerifier EqualityVerifier
@Test public void testSerializedException() throws Exception {
SerializedExceptionPBImpl orig=new SerializedExceptionPBImpl();
orig.init(new Exception("test exception"));
SerializedExceptionProto proto=orig.getProto();
SerializedExceptionPBImpl deser=new SerializedExceptionPBImpl(proto);
Assert.assertEquals(orig,deser);
Assert.assertEquals(orig.getMessage(),deser.getMessage());
Assert.assertEquals(orig.getRemoteTrace(),deser.getRemoteTrace());
Assert.assertEquals(orig.getCause(),deser.getCause());
}
InternalCallVerifier EqualityVerifier
@Test public void testEntities() throws Exception {
TimelineEntities entities=new TimelineEntities();
for (int j=0; j < 2; ++j) {
TimelineEntity entity=new TimelineEntity();
entity.setEntityId("entity id " + j);
entity.setEntityType("entity type " + j);
entity.setStartTime(System.currentTimeMillis());
for (int i=0; i < 2; ++i) {
TimelineEvent event=new TimelineEvent();
event.setTimestamp(System.currentTimeMillis());
event.setEventType("event type " + i);
event.addEventInfo("key1","val1");
event.addEventInfo("key2","val2");
entity.addEvent(event);
}
entity.addRelatedEntity("test ref type 1","test ref id 1");
entity.addRelatedEntity("test ref type 2","test ref id 2");
entity.addPrimaryFilter("pkey1","pval1");
entity.addPrimaryFilter("pkey2","pval2");
entity.addOtherInfo("okey1","oval1");
entity.addOtherInfo("okey2","oval2");
entities.addEntity(entity);
}
LOG.info("Entities in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(entities,true));
Assert.assertEquals(2,entities.getEntities().size());
TimelineEntity entity1=entities.getEntities().get(0);
Assert.assertEquals("entity id 0",entity1.getEntityId());
Assert.assertEquals("entity type 0",entity1.getEntityType());
Assert.assertEquals(2,entity1.getRelatedEntities().size());
Assert.assertEquals(2,entity1.getEvents().size());
Assert.assertEquals(2,entity1.getPrimaryFilters().size());
Assert.assertEquals(2,entity1.getOtherInfo().size());
TimelineEntity entity2=entities.getEntities().get(1);
Assert.assertEquals("entity id 1",entity2.getEntityId());
Assert.assertEquals("entity type 1",entity2.getEntityType());
Assert.assertEquals(2,entity2.getRelatedEntities().size());
Assert.assertEquals(2,entity2.getEvents().size());
Assert.assertEquals(2,entity2.getPrimaryFilters().size());
Assert.assertEquals(2,entity2.getOtherInfo().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testEvents() throws Exception {
TimelineEvents events=new TimelineEvents();
for (int j=0; j < 2; ++j) {
TimelineEvents.EventsOfOneEntity partEvents=new TimelineEvents.EventsOfOneEntity();
partEvents.setEntityId("entity id " + j);
partEvents.setEntityType("entity type " + j);
for (int i=0; i < 2; ++i) {
TimelineEvent event=new TimelineEvent();
event.setTimestamp(System.currentTimeMillis());
event.setEventType("event type " + i);
event.addEventInfo("key1","val1");
event.addEventInfo("key2","val2");
partEvents.addEvent(event);
}
events.addEvent(partEvents);
}
LOG.info("Events in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(events,true));
Assert.assertEquals(2,events.getAllEvents().size());
TimelineEvents.EventsOfOneEntity partEvents1=events.getAllEvents().get(0);
Assert.assertEquals("entity id 0",partEvents1.getEntityId());
Assert.assertEquals("entity type 0",partEvents1.getEntityType());
Assert.assertEquals(2,partEvents1.getEvents().size());
TimelineEvent event11=partEvents1.getEvents().get(0);
Assert.assertEquals("event type 0",event11.getEventType());
Assert.assertEquals(2,event11.getEventInfo().size());
TimelineEvent event12=partEvents1.getEvents().get(1);
Assert.assertEquals("event type 1",event12.getEventType());
Assert.assertEquals(2,event12.getEventInfo().size());
TimelineEvents.EventsOfOneEntity partEvents2=events.getAllEvents().get(1);
Assert.assertEquals("entity id 1",partEvents2.getEntityId());
Assert.assertEquals("entity type 1",partEvents2.getEntityType());
Assert.assertEquals(2,partEvents2.getEvents().size());
TimelineEvent event21=partEvents2.getEvents().get(0);
Assert.assertEquals("event type 0",event21.getEventType());
Assert.assertEquals(2,event21.getEventInfo().size());
TimelineEvent event22=partEvents2.getEvents().get(1);
Assert.assertEquals("event type 1",event22.getEventType());
Assert.assertEquals(2,event22.getEventInfo().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testTimelinePutErrors() throws Exception {
TimelinePutResponse TimelinePutErrors=new TimelinePutResponse();
TimelinePutError error1=new TimelinePutError();
error1.setEntityId("entity id 1");
error1.setEntityId("entity type 1");
error1.setErrorCode(TimelinePutError.NO_START_TIME);
TimelinePutErrors.addError(error1);
List response=new ArrayList();
response.add(error1);
TimelinePutError error2=new TimelinePutError();
error2.setEntityId("entity id 2");
error2.setEntityId("entity type 2");
error2.setErrorCode(TimelinePutError.IO_EXCEPTION);
response.add(error2);
TimelinePutErrors.addErrors(response);
LOG.info("Errors in JSON:");
LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(TimelinePutErrors,true));
Assert.assertEquals(3,TimelinePutErrors.getErrors().size());
TimelinePutError e=TimelinePutErrors.getErrors().get(0);
Assert.assertEquals(error1.getEntityId(),e.getEntityId());
Assert.assertEquals(error1.getEntityType(),e.getEntityType());
Assert.assertEquals(error1.getErrorCode(),e.getErrorCode());
e=TimelinePutErrors.getErrors().get(1);
Assert.assertEquals(error1.getEntityId(),e.getEntityId());
Assert.assertEquals(error1.getEntityType(),e.getEntityType());
Assert.assertEquals(error1.getErrorCode(),e.getErrorCode());
e=TimelinePutErrors.getErrors().get(2);
Assert.assertEquals(error2.getEntityId(),e.getEntityId());
Assert.assertEquals(error2.getEntityType(),e.getEntityType());
Assert.assertEquals(error2.getErrorCode(),e.getErrorCode());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testDebugFlag() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1","--debug"};
LOG.info("Initializing DS Client");
Client client=new Client(new Configuration(yarnCluster.getConfig()));
Assert.assertTrue(client.init(args));
LOG.info("Running DS Client");
Assert.assertTrue(client.run());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testContainerLaunchFailureHandling() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--container_memory","128"};
LOG.info("Initializing DS Client");
Client client=new Client(ContainerLaunchFailAppMaster.class.getName(),new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
Assert.assertFalse(result);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testDSRestartWithPreviousRunningContainers() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","1","--shell_command","sleep 8","--master_memory","512","--container_memory","128","--keep_containers_across_application_attempts"};
LOG.info("Initializing DS Client");
Client client=new Client(TestDSFailedAppMaster.class.getName(),new Configuration(yarnCluster.getConfig()));
client.init(args);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(result);
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=90000) public void testDSShellWithCustomLogPropertyFile() throws Exception {
final File basedir=new File("target",TestDistributedShell.class.getName());
final File tmpDir=new File(basedir,"tmpDir");
tmpDir.mkdirs();
final File customLogProperty=new File(tmpDir,"custom_log4j.properties");
if (customLogProperty.exists()) {
customLogProperty.delete();
}
if (!customLogProperty.createNewFile()) {
Assert.fail("Can not create custom log4j property file.");
}
PrintWriter fileWriter=new PrintWriter(customLogProperty);
fileWriter.write("log4j.rootLogger=debug,stdout");
fileWriter.close();
String[] args={"--jar",APPMASTER_JAR,"--num_containers","3","--shell_command","echo","--shell_args","HADOOP","--log_properties",customLogProperty.getAbsolutePath(),"--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
final Log LOG_Client=LogFactory.getLog(Client.class);
Assert.assertTrue(LOG_Client.isInfoEnabled());
Assert.assertFalse(LOG_Client.isDebugEnabled());
final Log LOG_AM=LogFactory.getLog(ApplicationMaster.class);
Assert.assertTrue(LOG_AM.isInfoEnabled());
Assert.assertFalse(LOG_AM.isDebugEnabled());
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(verifyContainerLog(3,null,true,"DEBUG") > 10);
Assert.assertTrue(LOG_Client.isInfoEnabled());
Assert.assertTrue(LOG_Client.isDebugEnabled());
Assert.assertTrue(LOG_AM.isInfoEnabled());
Assert.assertTrue(LOG_AM.isDebugEnabled());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=90000) public void testDSShell() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
final AtomicBoolean result=new AtomicBoolean(false);
Thread t=new Thread(){
public void run(){
try {
result.set(client.run());
}
catch ( Exception e) {
throw new RuntimeException(e);
}
}
}
;
t.start();
YarnClient yarnClient=YarnClient.createYarnClient();
yarnClient.init(new Configuration(yarnCluster.getConfig()));
yarnClient.start();
String hostName=NetUtils.getHostname();
boolean verified=false;
String errorMessage="";
while (!verified) {
List apps=yarnClient.getApplications();
if (apps.size() == 0) {
Thread.sleep(10);
continue;
}
ApplicationReport appReport=apps.get(0);
if (appReport.getHost().equals("N/A")) {
Thread.sleep(10);
continue;
}
errorMessage="Expected host name to start with '" + hostName + "', was '"+ appReport.getHost()+ "'. Expected rpc port to be '-1', was '"+ appReport.getRpcPort()+ "'.";
if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) {
verified=true;
}
if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) {
break;
}
}
Assert.assertTrue(errorMessage,verified);
t.join();
LOG.info("Client run completed. Result=" + result);
Assert.assertTrue(result.get());
TimelineEntities entitiesAttempts=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(),null,null,null,null,null,null,null,null);
Assert.assertNotNull(entitiesAttempts);
Assert.assertEquals(1,entitiesAttempts.getEntities().size());
Assert.assertEquals(2,entitiesAttempts.getEntities().get(0).getEvents().size());
Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString());
TimelineEntities entities=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_CONTAINER.toString(),null,null,null,null,null,null,null,null);
Assert.assertNotNull(entities);
Assert.assertEquals(2,entities.getEntities().size());
Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_CONTAINER.toString());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testDSShellWithMultipleArgs() throws Exception {
String[] args={"--jar",APPMASTER_JAR,"--num_containers","4","--shell_command","echo","--shell_args","HADOOP YARN MAPREDUCE HDFS","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
List expectedContent=new ArrayList();
expectedContent.add("HADOOP YARN MAPREDUCE HDFS");
verifyContainerLog(4,expectedContent,false,"");
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=90000) public void testDSShellWithShellScript() throws Exception {
final File basedir=new File("target",TestDistributedShell.class.getName());
final File tmpDir=new File(basedir,"tmpDir");
tmpDir.mkdirs();
final File customShellScript=new File(tmpDir,"custom_script.sh");
if (customShellScript.exists()) {
customShellScript.delete();
}
if (!customShellScript.createNewFile()) {
Assert.fail("Can not create custom shell script file.");
}
PrintWriter fileWriter=new PrintWriter(customShellScript);
fileWriter.write("echo testDSShellWithShellScript");
fileWriter.close();
System.out.println(customShellScript.getAbsolutePath());
String[] args={"--jar",APPMASTER_JAR,"--num_containers","1","--shell_script",customShellScript.getAbsolutePath(),"--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"};
LOG.info("Initializing DS Client");
final Client client=new Client(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=client.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running DS Client");
boolean result=client.run();
LOG.info("Client run completed. Result=" + result);
List expectedContent=new ArrayList();
expectedContent.add("testDSShellWithShellScript");
verifyContainerLog(1,expectedContent,false,"");
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testUMALauncher() throws Exception {
String classpath=getTestRuntimeClasspath();
String javaHome=System.getenv("JAVA_HOME");
if (javaHome == null) {
LOG.fatal("JAVA_HOME not defined. Test not running.");
return;
}
String[] args={"--classpath",classpath,"--queue","default","--cmd",javaHome + "/bin/java -Xmx512m " + TestUnmanagedAMLauncher.class.getCanonicalName()+ " success"};
LOG.info("Initializing Launcher");
UnmanagedAMLauncher launcher=new UnmanagedAMLauncher(new Configuration(yarnCluster.getConfig())){
public void launchAM( ApplicationAttemptId attemptId) throws IOException, YarnException {
YarnApplicationAttemptState attemptState=rmClient.getApplicationAttemptReport(attemptId).getYarnApplicationAttemptState();
Assert.assertTrue(attemptState.equals(YarnApplicationAttemptState.LAUNCHED));
super.launchAM(attemptId);
}
}
;
boolean initSuccess=launcher.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running Launcher");
boolean result=launcher.run();
LOG.info("Launcher run completed. Result=" + result);
Assert.assertTrue(result);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testUMALauncherError() throws Exception {
String classpath=getTestRuntimeClasspath();
String javaHome=System.getenv("JAVA_HOME");
if (javaHome == null) {
LOG.fatal("JAVA_HOME not defined. Test not running.");
return;
}
String[] args={"--classpath",classpath,"--queue","default","--cmd",javaHome + "/bin/java -Xmx512m " + TestUnmanagedAMLauncher.class.getCanonicalName()+ " failure"};
LOG.info("Initializing Launcher");
UnmanagedAMLauncher launcher=new UnmanagedAMLauncher(new Configuration(yarnCluster.getConfig()));
boolean initSuccess=launcher.init(args);
Assert.assertTrue(initSuccess);
LOG.info("Running Launcher");
try {
launcher.run();
fail("Expected an exception to occur as launch should have failed");
}
catch ( RuntimeException e) {
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetClusterNodesOnHA() throws Exception {
List reports=client.getNodeReports(NodeState.RUNNING);
Assert.assertTrue(reports != null && !reports.isEmpty());
Assert.assertEquals(cluster.createFakeNodeReports(),reports);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=15000) public void testGetDelegationTokenOnHA() throws Exception {
Token token=client.getRMDelegationToken(new Text(" "));
Assert.assertEquals(token,cluster.createFakeToken());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetApplicationAttemptsOnHA() throws Exception {
List reports=client.getApplicationAttempts(cluster.createFakeAppId());
Assert.assertTrue(reports != null && !reports.isEmpty());
Assert.assertEquals(cluster.createFakeApplicationAttemptReports(),reports);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetContainersOnHA() throws Exception {
List reports=client.getContainers(cluster.createFakeApplicationAttemptId());
Assert.assertTrue(reports != null && !reports.isEmpty());
Assert.assertEquals(cluster.createFakeContainerReports(),reports);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetNewApplicationOnHA() throws Exception {
ApplicationId appId=client.createApplication().getApplicationSubmissionContext().getApplicationId();
Assert.assertTrue(appId != null);
Assert.assertEquals(cluster.createFakeAppId(),appId);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetQueueInfoOnHA() throws Exception {
QueueInfo queueInfo=client.getQueueInfo("root");
Assert.assertTrue(queueInfo != null);
Assert.assertEquals(cluster.createFakeQueueInfo(),queueInfo);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetApplicationAttemptReportOnHA() throws Exception {
ApplicationAttemptReport report=client.getApplicationAttemptReport(cluster.createFakeApplicationAttemptId());
Assert.assertTrue(report != null);
Assert.assertEquals(cluster.createFakeApplicationAttemptReport(),report);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetApplicationReportOnHA() throws Exception {
ApplicationReport report=client.getApplicationReport(cluster.createFakeAppId());
Assert.assertTrue(report != null);
Assert.assertEquals(cluster.createFakeAppReport(),report);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetContainerReportOnHA() throws Exception {
ContainerReport report=client.getContainerReport(cluster.createFakeContainerId());
Assert.assertTrue(report != null);
Assert.assertEquals(cluster.createFakeContainerReport(),report);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetQueueUserAclsOnHA() throws Exception {
List queueUserAclsList=client.getQueueAclsInfo();
Assert.assertTrue(queueUserAclsList != null && !queueUserAclsList.isEmpty());
Assert.assertEquals(cluster.createFakeQueueUserACLInfoList(),queueUserAclsList);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetApplicationsOnHA() throws Exception {
List reports=client.getApplications();
Assert.assertTrue(reports != null && !reports.isEmpty());
Assert.assertEquals(cluster.createFakeAppReports(),reports);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=15000) public void testSubmitApplicationOnHA() throws Exception {
ApplicationSubmissionContext appContext=Records.newRecord(ApplicationSubmissionContext.class);
appContext.setApplicationId(cluster.createFakeAppId());
ContainerLaunchContext amContainer=Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
Resource capability=Records.newRecord(Resource.class);
capability.setMemory(10);
capability.setVirtualCores(1);
appContext.setResource(capability);
ApplicationId appId=client.submitApplication(appContext);
Assert.assertTrue(getActiveRM().getRMContext().getRMApps().containsKey(appId));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=15000) public void testGetClusterMetricsOnHA() throws Exception {
YarnClusterMetrics clusterMetrics=client.getYarnClusterMetrics();
Assert.assertTrue(clusterMetrics != null);
Assert.assertEquals(cluster.createFakeYarnClusterMetrics(),clusterMetrics);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=15000) public void testAllocateOnHA() throws YarnException, IOException {
AllocateRequest request=AllocateRequest.newInstance(0,50f,new ArrayList(),new ArrayList(),ResourceBlacklistRequest.newInstance(new ArrayList(),new ArrayList()));
AllocateResponse response=amClient.allocate(request);
Assert.assertEquals(response,this.cluster.createFakeAllocateResponse());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetRMDelegationTokenService(){
String defaultRMAddress=YarnConfiguration.DEFAULT_RM_ADDRESS;
YarnConfiguration conf=new YarnConfiguration();
Text tokenService=ClientRMProxy.getRMDelegationTokenService(conf);
String[] services=tokenService.toString().split(",");
assertEquals(1,services.length);
for ( String service : services) {
assertTrue("Incorrect token service name",service.contains(defaultRMAddress));
}
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm1"),"0.0.0.0");
conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm2"),"0.0.0.0");
tokenService=ClientRMProxy.getRMDelegationTokenService(conf);
services=tokenService.toString().split(",");
assertEquals(2,services.length);
for ( String service : services) {
assertTrue("Incorrect token service name",service.contains(defaultRMAddress));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testWebAppProxyInStandAloneMode() throws YarnException, InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
WebAppProxyServer webAppProxyServer=new WebAppProxyServer();
try {
conf.set(YarnConfiguration.PROXY_ADDRESS,"0.0.0.0:9099");
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
verifyConnections();
webAppProxyServer.init(conf);
Assert.assertEquals(STATE.INITED,webAppProxyServer.getServiceState());
webAppProxyServer.start();
Assert.assertEquals(STATE.STARTED,webAppProxyServer.getServiceState());
URL wrongUrl=new URL("http://0.0.0.0:9099/proxy/" + fakeAppId);
HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection();
proxyConn.connect();
verifyResponse(proxyConn);
explicitFailover();
verifyConnections();
proxyConn.connect();
verifyResponse(proxyConn);
}
finally {
webAppProxyServer.stop();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testExplicitFailover() throws YarnException, InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
verifyConnections();
explicitFailover();
verifyConnections();
explicitFailover();
verifyConnections();
}
InternalCallVerifier BooleanVerifier
@SuppressWarnings("unchecked") @Test public void testAutomaticFailover() throws YarnException, InterruptedException, IOException {
conf.set(YarnConfiguration.RM_CLUSTER_ID,"yarn-test-cluster");
conf.set(YarnConfiguration.RM_ZK_ADDRESS,hostPort);
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,2000);
cluster.init(conf);
cluster.start();
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
verifyConnections();
failover();
verifyConnections();
failover();
verifyConnections();
ResourceManager rm=cluster.getResourceManager(cluster.getActiveRMIndex());
RMFatalEvent event=new RMFatalEvent(RMFatalEventType.STATE_STORE_FENCED,"Fake RMFatalEvent");
rm.getRMContext().getDispatcher().getEventHandler().handle(event);
int maxWaitingAttempts=2000;
while (maxWaitingAttempts-- > 0) {
if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) {
break;
}
Thread.sleep(1);
}
Assert.assertFalse("RM didn't transition to Standby ",maxWaitingAttempts == 0);
verifyConnections();
}
InternalCallVerifier BooleanVerifier
@Test public void testEmbeddedWebAppProxy() throws YarnException, InterruptedException, IOException {
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
cluster.init(conf);
cluster.start();
getAdminService(0).transitionToActive(req);
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
verifyConnections();
URL wrongUrl=new URL("http://0.0.0.0:18088/proxy/" + fakeAppId);
HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection();
proxyConn.connect();
verifyResponse(proxyConn);
explicitFailover();
verifyConnections();
proxyConn.connect();
verifyResponse(proxyConn);
}
InternalCallVerifier NullVerifier
/**
* Test method refreshAdminAcls. This method is present and it works.
*/
@Test public void testRefreshAdminAcls() throws Exception {
RefreshAdminAclsRequest request=recordFactory.newRecordInstance(RefreshAdminAclsRequest.class);
RefreshAdminAclsResponse response=client.refreshAdminAcls(request);
assertNotNull(response);
}
InternalCallVerifier NullVerifier
/**
* Test method refreshSuperUserGroupsConfiguration. This method present and it works.
*/
@Test public void testRefreshSuperUserGroupsConfiguration() throws Exception {
RefreshSuperUserGroupsConfigurationRequest request=recordFactory.newRecordInstance(RefreshSuperUserGroupsConfigurationRequest.class);
RefreshSuperUserGroupsConfigurationResponse response=client.refreshSuperUserGroupsConfiguration(request);
assertNotNull(response);
}
InternalCallVerifier NullVerifier
@Test public void testUpdateNodeResource() throws Exception {
UpdateNodeResourceRequest request=recordFactory.newRecordInstance(UpdateNodeResourceRequest.class);
UpdateNodeResourceResponse response=client.updateNodeResource(request);
assertNotNull(response);
}
InternalCallVerifier NullVerifier
/**
* Test method refreshUserToGroupsMappings. This method is present and it works.
*/
@Test public void testRefreshUserToGroupsMappings() throws Exception {
RefreshUserToGroupsMappingsRequest request=recordFactory.newRecordInstance(RefreshUserToGroupsMappingsRequest.class);
RefreshUserToGroupsMappingsResponse response=client.refreshUserToGroupsMappings(request);
assertNotNull(response);
}
InternalCallVerifier NullVerifier
/**
* Test method refreshQueues. This method is present and it works.
*/
@Test public void testRefreshQueues() throws Exception {
RefreshQueuesRequest request=recordFactory.newRecordInstance(RefreshQueuesRequest.class);
RefreshQueuesResponse response=client.refreshQueues(request);
assertNotNull(response);
}
InternalCallVerifier NullVerifier
@Test public void testRefreshServiceAcls() throws Exception {
RefreshServiceAclsRequest request=recordFactory.newRecordInstance(RefreshServiceAclsRequest.class);
RefreshServiceAclsResponse response=client.refreshServiceAcls(request);
assertNotNull(response);
}
InternalCallVerifier NullVerifier
/**
* Test method refreshNodes. This method is present and it works.
*/
@Test public void testRefreshNodes() throws Exception {
resourceManager.getClientRMService();
RefreshNodesRequest request=recordFactory.newRecordInstance(RefreshNodesRequest.class);
RefreshNodesResponse response=client.refreshNodes(request);
assertNotNull(response);
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test CancelDelegationTokenRequestPBImpl.
* Test a transformation to prototype and back
*/
@Test public void testCancelDelegationTokenRequestPBImpl(){
Token token=getDelegationToken();
CancelDelegationTokenRequestPBImpl original=new CancelDelegationTokenRequestPBImpl();
original.setDelegationToken(token);
CancelDelegationTokenRequestProto protoType=original.getProto();
CancelDelegationTokenRequestPBImpl copy=new CancelDelegationTokenRequestPBImpl(protoType);
assertNotNull(copy.getDelegationToken());
assertEquals(token,copy.getDelegationToken());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Test RenewDelegationTokenRequestPBImpl.
* Test a transformation to prototype and back
*/
@Test public void testRenewDelegationTokenRequestPBImpl(){
Token token=getDelegationToken();
RenewDelegationTokenRequestPBImpl original=new RenewDelegationTokenRequestPBImpl();
original.setDelegationToken(token);
RenewDelegationTokenRequestProto protoType=original.getProto();
RenewDelegationTokenRequestPBImpl copy=new RenewDelegationTokenRequestPBImpl(protoType);
assertNotNull(copy.getDelegationToken());
assertEquals(token,copy.getDelegationToken());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
/**
* Simple test Resource request.
* Test hashCode, equals and compare.
*/
@Test public void testResourceRequest(){
Resource resource=recordFactory.newRecordInstance(Resource.class);
Priority priority=recordFactory.newRecordInstance(Priority.class);
ResourceRequest original=ResourceRequest.newInstance(priority,"localhost",resource,2);
ResourceRequest copy=ResourceRequest.newInstance(priority,"localhost",resource,2);
assertTrue(original.equals(copy));
assertEquals(0,original.compareTo(copy));
assertTrue(original.hashCode() == copy.hashCode());
copy.setNumContainers(1);
assertFalse(original.equals(copy));
assertNotSame(0,original.compareTo(copy));
assertFalse(original.hashCode() == copy.hashCode());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testNMClientAsync() throws Exception {
Configuration conf=new Configuration();
conf.setInt(YarnConfiguration.NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE,10);
int expectedSuccess=40;
int expectedFailure=40;
asyncClient=new MockNMClientAsync1(expectedSuccess,expectedFailure);
asyncClient.init(conf);
Assert.assertEquals("The max thread pool size is not correctly set",10,asyncClient.maxThreadPoolSize);
asyncClient.start();
for (int i=0; i < expectedSuccess + expectedFailure; ++i) {
if (i == expectedSuccess) {
while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isAllSuccessCallsExecuted()) {
Thread.sleep(10);
}
asyncClient.setClient(mockNMClient(1));
}
Container container=mockContainer(i);
ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class);
asyncClient.startContainerAsync(container,clc);
}
while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isStartAndQueryFailureCallsExecuted()) {
Thread.sleep(10);
}
asyncClient.setClient(mockNMClient(2));
((TestCallbackHandler1)asyncClient.getCallbackHandler()).path=false;
for (int i=0; i < expectedFailure; ++i) {
Container container=mockContainer(expectedSuccess + expectedFailure + i);
ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class);
asyncClient.startContainerAsync(container,clc);
}
while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isStopFailureCallsExecuted()) {
Thread.sleep(10);
}
for ( String errorMsg : ((TestCallbackHandler1)asyncClient.getCallbackHandler()).errorMsgs) {
System.out.println(errorMsg);
}
Assert.assertEquals("Error occurs in CallbackHandler",0,((TestCallbackHandler1)asyncClient.getCallbackHandler()).errorMsgs.size());
for ( String errorMsg : ((MockNMClientAsync1)asyncClient).errorMsgs) {
System.out.println(errorMsg);
}
Assert.assertEquals("Error occurs in ContainerEventProcessor",0,((MockNMClientAsync1)asyncClient).errorMsgs.size());
while (asyncClient.containers.size() > 0) {
Thread.sleep(10);
}
asyncClient.stop();
Assert.assertFalse("The thread of Container Management Event Dispatcher is still alive",asyncClient.eventDispatcherThread.isAlive());
Assert.assertTrue("The thread pool is not shut down",asyncClient.threadPool.isShutdown());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=10000) public void testOutOfOrder() throws Exception {
CyclicBarrier barrierA=new CyclicBarrier(2);
CyclicBarrier barrierB=new CyclicBarrier(2);
CyclicBarrier barrierC=new CyclicBarrier(2);
asyncClient=new MockNMClientAsync2(barrierA,barrierB,barrierC);
asyncClient.init(new Configuration());
asyncClient.start();
final Container container=mockContainer(1);
final ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class);
Thread t=new Thread(){
@Override public void run(){
asyncClient.startContainerAsync(container,clc);
}
}
;
t.start();
barrierA.await();
asyncClient.stopContainerAsync(container.getId(),container.getNodeId());
barrierC.await();
Assert.assertFalse("Starting and stopping should be out of order",((TestCallbackHandler2)asyncClient.getCallbackHandler()).exceptionOccurred.get());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testGetApplicationReport() throws YarnException, IOException {
Configuration conf=new Configuration();
final AHSClient client=new MockAHSClient();
client.init(conf);
client.start();
List expectedReports=((MockAHSClient)client).getReports();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationReport report=client.getApplicationReport(applicationId);
Assert.assertEquals(report,expectedReports.get(0));
Assert.assertEquals(report.getApplicationId().toString(),expectedReports.get(0).getApplicationId().toString());
client.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testGetApplications() throws YarnException, IOException {
Configuration conf=new Configuration();
final AHSClient client=new MockAHSClient();
client.init(conf);
client.start();
List expectedReports=((MockAHSClient)client).getReports();
List reports=client.getApplications();
Assert.assertEquals(reports,expectedReports);
reports=client.getApplications();
Assert.assertEquals(reports.size(),4);
client.stop();
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetApplicationAttempts() throws YarnException, IOException {
Configuration conf=new Configuration();
final AHSClient client=new MockAHSClient();
client.init(conf);
client.start();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
List reports=client.getApplicationAttempts(applicationId);
Assert.assertNotNull(reports);
Assert.assertEquals(reports.get(0).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,1));
Assert.assertEquals(reports.get(1).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,2));
client.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetApplicationAttempt() throws YarnException, IOException {
Configuration conf=new Configuration();
final AHSClient client=new MockAHSClient();
client.init(conf);
client.start();
List expectedReports=((MockAHSClient)client).getReports();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ApplicationAttemptReport report=client.getApplicationAttemptReport(appAttemptId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getApplicationAttemptId().toString(),expectedReports.get(0).getCurrentApplicationAttemptId().toString());
client.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetContainerReport() throws YarnException, IOException {
Configuration conf=new Configuration();
final AHSClient client=new MockAHSClient();
client.init(conf);
client.start();
List expectedReports=((MockAHSClient)client).getReports();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
ContainerReport report=client.getContainerReport(containerId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getContainerId().toString(),(ContainerId.newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(),1)).toString());
client.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetContainers() throws YarnException, IOException {
Configuration conf=new Configuration();
final AHSClient client=new MockAHSClient();
client.init(conf);
client.start();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
List reports=client.getContainers(appAttemptId);
Assert.assertNotNull(reports);
Assert.assertEquals(reports.get(0).getContainerId(),(ContainerId.newInstance(appAttemptId,1)));
Assert.assertEquals(reports.get(1).getContainerId(),(ContainerId.newInstance(appAttemptId,2)));
client.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAMRMClientMatchingFit() throws YarnException, IOException {
AMRMClient amClient=null;
try {
amClient=AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
Resource capability1=Resource.newInstance(1024,2);
Resource capability2=Resource.newInstance(1024,1);
Resource capability3=Resource.newInstance(1000,2);
Resource capability4=Resource.newInstance(2000,1);
Resource capability5=Resource.newInstance(1000,3);
Resource capability6=Resource.newInstance(2000,1);
Resource capability7=Resource.newInstance(2000,1);
ContainerRequest storedContainer1=new ContainerRequest(capability1,nodes,racks,priority);
ContainerRequest storedContainer2=new ContainerRequest(capability2,nodes,racks,priority);
ContainerRequest storedContainer3=new ContainerRequest(capability3,nodes,racks,priority);
ContainerRequest storedContainer4=new ContainerRequest(capability4,nodes,racks,priority);
ContainerRequest storedContainer5=new ContainerRequest(capability5,nodes,racks,priority);
ContainerRequest storedContainer6=new ContainerRequest(capability6,nodes,racks,priority);
ContainerRequest storedContainer7=new ContainerRequest(capability7,nodes,racks,priority2,false);
amClient.addContainerRequest(storedContainer1);
amClient.addContainerRequest(storedContainer2);
amClient.addContainerRequest(storedContainer3);
amClient.addContainerRequest(storedContainer4);
amClient.addContainerRequest(storedContainer5);
amClient.addContainerRequest(storedContainer6);
amClient.addContainerRequest(storedContainer7);
List extends Collection> matches;
ContainerRequest storedRequest;
Resource testCapability1=Resource.newInstance(1024,2);
matches=amClient.getMatchingRequests(priority,node,testCapability1);
verifyMatches(matches,1);
storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
amClient.removeContainerRequest(storedContainer1);
Resource testCapability2=Resource.newInstance(2000,1);
matches=amClient.getMatchingRequests(priority,node,testCapability2);
verifyMatches(matches,2);
int i=0;
for ( ContainerRequest storedRequest1 : matches.get(0)) {
if (i++ == 0) {
assertEquals(storedContainer4,storedRequest1);
}
else {
assertEquals(storedContainer6,storedRequest1);
}
}
amClient.removeContainerRequest(storedContainer6);
Resource testCapability3=Resource.newInstance(4000,4);
matches=amClient.getMatchingRequests(priority,node,testCapability3);
assert (matches.size() == 4);
Resource testCapability4=Resource.newInstance(1024,2);
matches=amClient.getMatchingRequests(priority,node,testCapability4);
assert (matches.size() == 2);
for ( Collection testSet : matches) {
assertEquals(1,testSet.size());
ContainerRequest testRequest=testSet.iterator().next();
assertTrue(testRequest != storedContainer4);
assertTrue(testRequest != storedContainer5);
assert (testRequest == storedContainer2 || testRequest == storedContainer3);
}
Resource testCapability5=Resource.newInstance(512,4);
matches=amClient.getMatchingRequests(priority,node,testCapability5);
assert (matches.size() == 0);
Resource testCapability7=Resource.newInstance(2000,1);
matches=amClient.getMatchingRequests(priority2,ResourceRequest.ANY,testCapability7);
assert (matches.size() == 0);
matches=amClient.getMatchingRequests(priority2,node,testCapability7);
assert (matches.size() == 1);
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
InternalCallVerifier IdentityVerifier
@Test(timeout=60000) public void testAMRMClient() throws YarnException, IOException {
AMRMClient amClient=null;
try {
amClient=AMRMClient.createAMRMClient();
amClient.setNMTokenCache(new NMTokenCache());
Assert.assertNotSame(NMTokenCache.getSingleton(),amClient.getNMTokenCache());
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
testAllocation((AMRMClientImpl)amClient);
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAllocationWithBlacklist() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=(AMRMClientImpl)AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
assertEquals(0,amClient.ask.size());
assertEquals(0,amClient.release.size());
ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority);
amClient.addContainerRequest(storedContainer1);
assertEquals(3,amClient.ask.size());
assertEquals(0,amClient.release.size());
List localNodeBlacklist=new ArrayList();
localNodeBlacklist.add(node);
amClient.updateBlacklist(localNodeBlacklist,null);
int allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION);
assertEquals(0,allocatedContainerCount);
amClient.updateBlacklist(null,localNodeBlacklist);
ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority);
amClient.addContainerRequest(storedContainer2);
allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION);
assertEquals(2,allocatedContainerCount);
assertTrue(amClient.blacklistAdditions.isEmpty());
assertTrue(amClient.blacklistRemovals.isEmpty());
ContainerRequest invalidContainerRequest=new ContainerRequest(Resource.newInstance(-1024,1),nodes,racks,priority);
amClient.addContainerRequest(invalidContainerRequest);
amClient.updateBlacklist(localNodeBlacklist,null);
try {
amClient.allocate(0.1f);
fail("there should be an exception here.");
}
catch ( Exception e) {
assertEquals(1,amClient.blacklistAdditions.size());
}
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testAMRMClientWithBlacklist() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=(AMRMClientImpl)AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
String[] nodes={"node1","node2","node3"};
List nodeList01=new ArrayList();
nodeList01.add(nodes[0]);
nodeList01.add(nodes[1]);
amClient.updateBlacklist(nodeList01,null);
assertEquals(2,amClient.blacklistAdditions.size());
assertEquals(0,amClient.blacklistRemovals.size());
List nodeList02=new ArrayList();
nodeList02.add(nodes[0]);
nodeList02.add(nodes[2]);
amClient.updateBlacklist(nodeList02,null);
assertEquals(3,amClient.blacklistAdditions.size());
assertEquals(0,amClient.blacklistRemovals.size());
List nodeList12=new ArrayList();
nodeList12.add(nodes[1]);
nodeList12.add(nodes[2]);
amClient.updateBlacklist(null,nodeList12);
assertEquals(1,amClient.blacklistAdditions.size());
assertEquals(2,amClient.blacklistRemovals.size());
List nodeList1=new ArrayList();
nodeList1.add(nodes[1]);
amClient.updateBlacklist(nodeList1,null);
assertEquals(2,amClient.blacklistAdditions.size());
assertEquals(1,amClient.blacklistRemovals.size());
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAMRMClientOnAMRMTokenRollOver() throws YarnException, IOException {
AMRMClient amClient=null;
try {
AMRMTokenSecretManager amrmTokenSecretManager=yarnCluster.getResourceManager().getRMContext().getAMRMTokenSecretManager();
amClient=AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
Long startTime=System.currentTimeMillis();
amClient.registerApplicationMaster("Host",10000,"");
org.apache.hadoop.security.token.Token amrmToken_1=getAMRMToken();
Assert.assertNotNull(amrmToken_1);
Assert.assertEquals(amrmToken_1.decodeIdentifier().getKeyId(),amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId());
while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
amClient.allocate(0.1f);
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
e.printStackTrace();
}
}
amClient.allocate(0.1f);
org.apache.hadoop.security.token.Token amrmToken_2=getAMRMToken();
Assert.assertNotNull(amrmToken_2);
Assert.assertEquals(amrmToken_2.decodeIdentifier().getKeyId(),amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId());
Assert.assertNotEquals(amrmToken_1,amrmToken_2);
amClient.allocate(0.1f);
while (true) {
if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getCurrnetMasterKeyData().getMasterKey().getKeyId()) {
if (amrmTokenSecretManager.getNextMasterKeyData() == null) {
break;
}
else if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getNextMasterKeyData().getMasterKey().getKeyId()) {
break;
}
}
amClient.allocate(0.1f);
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
try {
UserGroupInformation testUser=UserGroupInformation.createRemoteUser("testUser");
SecurityUtil.setTokenService(amrmToken_2,yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress());
testUser.addToken(amrmToken_2);
testUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class,yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress(),conf);
}
}
).allocate(Records.newRecord(AllocateRequest.class));
Assert.fail("The old Token should not work");
}
catch ( Exception ex) {
Assert.assertTrue(ex instanceof InvalidToken);
Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + amrmToken_2.decodeIdentifier().getApplicationAttemptId()));
}
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=new AMRMClientImpl();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
Resource capability=Resource.newInstance(1024,2);
ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,null,priority);
amClient.addContainerRequest(storedContainer1);
List extends Collection> matches;
ContainerRequest storedRequest;
matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,1);
storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
matches=amClient.getMatchingRequests(priority,rack,capability);
verifyMatches(matches,1);
storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
amClient.removeContainerRequest(storedContainer1);
matches=amClient.getMatchingRequests(priority,rack,capability);
assertTrue(matches.isEmpty());
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testAMRMClientMatchStorage() throws YarnException, IOException {
AMRMClientImpl amClient=null;
try {
amClient=(AMRMClientImpl)AMRMClient.createAMRMClient();
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
Priority priority1=Records.newRecord(Priority.class);
priority1.setPriority(2);
ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority);
ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority);
ContainerRequest storedContainer3=new ContainerRequest(capability,null,null,priority1);
amClient.addContainerRequest(storedContainer1);
amClient.addContainerRequest(storedContainer2);
amClient.addContainerRequest(storedContainer3);
int containersRequestedAny=amClient.remoteRequestsTable.get(priority).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers();
assertEquals(2,containersRequestedAny);
containersRequestedAny=amClient.remoteRequestsTable.get(priority1).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers();
assertEquals(1,containersRequestedAny);
List extends Collection> matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,2);
matches=amClient.getMatchingRequests(priority,rack,capability);
verifyMatches(matches,2);
matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability);
verifyMatches(matches,2);
matches=amClient.getMatchingRequests(priority1,rack,capability);
assertTrue(matches.isEmpty());
matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability);
verifyMatches(matches,1);
amClient.removeContainerRequest(storedContainer3);
matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,2);
amClient.removeContainerRequest(storedContainer2);
matches=amClient.getMatchingRequests(priority,node,capability);
verifyMatches(matches,1);
matches=amClient.getMatchingRequests(priority,rack,capability);
verifyMatches(matches,1);
ContainerRequest storedRequest=matches.get(0).iterator().next();
assertEquals(storedContainer1,storedRequest);
amClient.removeContainerRequest(storedContainer1);
matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability);
assertTrue(matches.isEmpty());
matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability);
assertTrue(matches.isEmpty());
assertTrue(amClient.remoteRequestsTable.isEmpty());
amClient.addContainerRequest(storedContainer1);
amClient.addContainerRequest(storedContainer3);
int allocatedContainerCount=0;
int iterationsLeft=3;
while (allocatedContainerCount < 2 && iterationsLeft-- > 0) {
Log.info(" == alloc " + allocatedContainerCount + " it left "+ iterationsLeft);
AllocateResponse allocResponse=amClient.allocate(0.1f);
assertEquals(0,amClient.ask.size());
assertEquals(0,amClient.release.size());
assertEquals(nodeCount,amClient.getClusterNodeCount());
allocatedContainerCount+=allocResponse.getAllocatedContainers().size();
for ( Container container : allocResponse.getAllocatedContainers()) {
ContainerRequest expectedRequest=container.getPriority().equals(storedContainer1.getPriority()) ? storedContainer1 : storedContainer3;
matches=amClient.getMatchingRequests(container.getPriority(),ResourceRequest.ANY,container.getResource());
verifyMatches(matches,1);
ContainerRequest matchedRequest=matches.get(0).iterator().next();
assertEquals(matchedRequest,expectedRequest);
amClient.removeContainerRequest(matchedRequest);
amClient.releaseAssignedContainer(container.getId());
}
if (allocatedContainerCount < containersRequestedAny) {
sleep(100);
}
}
assertEquals(2,allocatedContainerCount);
AllocateResponse allocResponse=amClient.allocate(0.1f);
assertEquals(0,amClient.release.size());
assertEquals(0,amClient.ask.size());
assertEquals(0,allocResponse.getAllocatedContainers().size());
assertTrue(amClient.remoteRequestsTable.isEmpty());
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
}
finally {
if (amClient != null && amClient.getServiceState() == STATE.STARTED) {
amClient.stop();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testAMRMClientOnAMRMTokenRollOverOnRMRestart() throws Exception {
conf.setLong(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,am_expire_ms);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MyResourceManager2 rm1=new MyResourceManager2(conf,memStore);
rm1.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher();
Long startTime=System.currentTimeMillis();
RMApp app=rm1.submitApp(1024);
dispatcher.await();
MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
AMRMTokenSecretManager amrmTokenSecretManagerForRM1=rm1.getRMContext().getAMRMTokenSecretManager();
org.apache.hadoop.security.token.Token token=amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
ugi.addTokenIdentifier(token.decodeIdentifier());
AMRMClient amClient=new MyAMRMClientImpl(rm1);
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("h1",10000,"");
amClient.allocate(0.1f);
while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
amClient.allocate(0.1f);
try {
Thread.sleep(1000);
}
catch ( InterruptedException e) {
}
}
Assert.assertTrue(amrmTokenSecretManagerForRM1.getMasterKey().getMasterKey().getKeyId() != token.decodeIdentifier().getKeyId());
amClient.allocate(0.1f);
org.apache.hadoop.security.token.Token newToken=amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId);
int waitCount=0;
while (waitCount++ <= 50) {
if (amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData().getMasterKey().getKeyId() != token.decodeIdentifier().getKeyId()) {
break;
}
try {
amClient.allocate(0.1f);
}
catch ( Exception ex) {
break;
}
Thread.sleep(500);
}
Assert.assertTrue(amrmTokenSecretManagerForRM1.getNextMasterKeyData() == null);
Assert.assertTrue(amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData().getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId());
conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS,"0.0.0.0:9030");
final MyResourceManager2 rm2=new MyResourceManager2(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
((MyAMRMClientImpl)amClient).updateRMProxy(rm2);
dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher();
AMRMTokenSecretManager amrmTokenSecretManagerForRM2=rm2.getRMContext().getAMRMTokenSecretManager();
Assert.assertTrue(amrmTokenSecretManagerForRM2.getCurrnetMasterKeyData().getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId());
Assert.assertTrue(amrmTokenSecretManagerForRM2.getNextMasterKeyData() == null);
try {
UserGroupInformation testUser=UserGroupInformation.createRemoteUser("testUser");
SecurityUtil.setTokenService(token,rm2.getApplicationMasterService().getBindAddress());
testUser.addToken(token);
testUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class,rm2.getApplicationMasterService().getBindAddress(),conf);
}
}
).allocate(Records.newRecord(AllocateRequest.class));
Assert.fail("The old Token should not work");
}
catch ( Exception ex) {
Assert.assertTrue(ex instanceof InvalidToken);
Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + token.decodeIdentifier().getApplicationAttemptId()));
}
amClient.allocate(0.1f);
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
amClient.stop();
rm1.stop();
rm2.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testAMRMClientResendsRequestsOnRMRestart() throws Exception {
UserGroupInformation.setLoginUser(null);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MyResourceManager rm1=new MyResourceManager(conf,memStore);
rm1.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher();
RMApp app=rm1.submitApp(1024);
dispatcher.await();
MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
org.apache.hadoop.security.token.Token token=rm1.getRMContext().getRMApps().get(appAttemptId.getApplicationId()).getRMAppAttempt(appAttemptId).getAMRMToken();
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
ugi.addTokenIdentifier(token.decodeIdentifier());
AMRMClient amClient=new MyAMRMClientImpl(rm1);
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("Host",10000,"");
ContainerRequest cRequest1=createReq(1,1024,new String[]{"h1"});
amClient.addContainerRequest(cRequest1);
ContainerRequest cRequest2=createReq(1,1024,new String[]{"h1","h2"});
amClient.addContainerRequest(cRequest2);
List blacklistAdditions=new ArrayList();
List blacklistRemoval=new ArrayList();
blacklistAdditions.add("h2");
blacklistRemoval.add("h10");
amClient.updateBlacklist(blacklistAdditions,blacklistRemoval);
blacklistAdditions.remove("h2");
AllocateResponse allocateResponse=amClient.allocate(0.1f);
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,allocateResponse.getAllocatedContainers().size());
assertAsksAndReleases(4,0,rm1);
assertBlacklistAdditionsAndRemovals(1,1,rm1);
nm1.nodeHeartbeat(true);
dispatcher.await();
allocateResponse=amClient.allocate(0.2f);
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",2,allocateResponse.getAllocatedContainers().size());
assertAsksAndReleases(0,0,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
List allocatedContainers=allocateResponse.getAllocatedContainers();
amClient.removeContainerRequest(cRequest1);
amClient.removeContainerRequest(cRequest2);
allocateResponse=amClient.allocate(0.2f);
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,allocateResponse.getAllocatedContainers().size());
assertAsksAndReleases(4,0,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
ContainerRequest cRequest3=createReq(1,1024,new String[]{"h1"});
amClient.addContainerRequest(cRequest3);
int pendingRelease=0;
Iterator it=allocatedContainers.iterator();
while (it.hasNext()) {
amClient.releaseAssignedContainer(it.next().getId());
pendingRelease++;
it.remove();
break;
}
allocateResponse=amClient.allocate(0.3f);
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,allocateResponse.getAllocatedContainers().size());
assertAsksAndReleases(3,pendingRelease,rm1);
assertBlacklistAdditionsAndRemovals(0,0,rm1);
int completedContainer=allocateResponse.getCompletedContainersStatuses().size();
pendingRelease-=completedContainer;
MyResourceManager rm2=new MyResourceManager(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
((MyAMRMClientImpl)amClient).updateRMProxy(rm2);
dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher();
NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
nm1=new MockNM("h1:1234",10240,rm2.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
blacklistAdditions.add("h3");
amClient.updateBlacklist(blacklistAdditions,null);
blacklistAdditions.remove("h3");
it=allocatedContainers.iterator();
while (it.hasNext()) {
amClient.releaseAssignedContainer(it.next().getId());
pendingRelease++;
it.remove();
}
ContainerRequest cRequest4=createReq(1,1024,new String[]{"h1","h2"});
amClient.addContainerRequest(cRequest4);
allocateResponse=amClient.allocate(0.3f);
dispatcher.await();
completedContainer=allocateResponse.getCompletedContainersStatuses().size();
pendingRelease-=completedContainer;
assertAsksAndReleases(4,pendingRelease,rm2);
assertBlacklistAdditionsAndRemovals(2,0,rm2);
ContainerRequest cRequest5=createReq(1,1024,new String[]{"h1","h2","h3"});
amClient.addContainerRequest(cRequest5);
allocateResponse=amClient.allocate(0.5f);
dispatcher.await();
Assert.assertEquals("No of assignments must be 0",0,allocateResponse.getAllocatedContainers().size());
assertAsksAndReleases(5,0,rm2);
assertBlacklistAdditionsAndRemovals(0,0,rm2);
int noAssignedContainer=0;
int count=5;
while (count-- > 0) {
nm1.nodeHeartbeat(true);
dispatcher.await();
allocateResponse=amClient.allocate(0.5f);
dispatcher.await();
noAssignedContainer+=allocateResponse.getAllocatedContainers().size();
if (noAssignedContainer == 3) {
break;
}
Thread.sleep(1000);
}
Assert.assertEquals("Number of container should be 3",3,noAssignedContainer);
amClient.stop();
rm1.stop();
rm2.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testAMRMClientForUnregisterAMOnRMRestart() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MyResourceManager rm1=new MyResourceManager(conf,memStore);
rm1.start();
DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher();
RMApp app=rm1.submitApp(1024);
dispatcher.await();
MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId();
rm1.sendAMLaunched(appAttemptId);
dispatcher.await();
org.apache.hadoop.security.token.Token token=rm1.getRMContext().getRMApps().get(appAttemptId.getApplicationId()).getRMAppAttempt(appAttemptId).getAMRMToken();
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
ugi.addTokenIdentifier(token.decodeIdentifier());
AMRMClient amClient=new MyAMRMClientImpl(rm1);
amClient.init(conf);
amClient.start();
amClient.registerApplicationMaster("h1",10000,"");
amClient.allocate(0.1f);
MyResourceManager rm2=new MyResourceManager(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
((MyAMRMClientImpl)amClient).updateRMProxy(rm2);
dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher();
NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
nm1=new MockNM("h1:1234",10240,rm2.getResourceTrackerService());
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
NMContainerStatus containerReport=NMContainerStatus.newInstance(containerId,ContainerState.RUNNING,Resource.newInstance(1024,1),"recover container",0,Priority.newInstance(0),0);
nm1.registerNode(Arrays.asList(containerReport),null);
nm1.nodeHeartbeat(true);
dispatcher.await();
amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
rm2.waitForState(appAttemptId,RMAppAttemptState.FINISHING);
nm1.nodeHeartbeat(appAttemptId,1,ContainerState.COMPLETE);
rm2.waitForState(appAttemptId,RMAppAttemptState.FINISHED);
rm2.waitForState(app.getApplicationId(),RMAppState.FINISHED);
amClient.stop();
rm1.stop();
rm2.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=180000) public void testNMClientNoCleanupOnStop() throws YarnException, IOException {
rmClient.registerApplicationMaster("Host",10000,"");
testContainerManagement(nmClient,allocateContainers(rmClient,5));
rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
stopNmClient(false);
assertFalse(nmClient.startedContainers.isEmpty());
nmClient.cleanupRunningContainers();
assertEquals(0,nmClient.startedContainers.size());
}
BranchVerifier TestInitializer UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Before public void setup() throws YarnException, IOException {
conf=new YarnConfiguration();
yarnCluster=new MiniYARNCluster(TestAMRMClient.class.getName(),nodeCount,1,1);
yarnCluster.init(conf);
yarnCluster.start();
assertNotNull(yarnCluster);
assertEquals(STATE.STARTED,yarnCluster.getServiceState());
yarnClient=(YarnClientImpl)YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
assertNotNull(yarnClient);
assertEquals(STATE.STARTED,yarnClient.getServiceState());
nodeReports=yarnClient.getNodeReports(NodeState.RUNNING);
ApplicationSubmissionContext appContext=yarnClient.createApplication().getApplicationSubmissionContext();
ApplicationId appId=appContext.getApplicationId();
appContext.setApplicationName("Test");
Priority pri=Priority.newInstance(0);
appContext.setPriority(pri);
appContext.setQueue("default");
ContainerLaunchContext amContainer=Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
appContext.setUnmanagedAM(true);
SubmitApplicationRequest appRequest=Records.newRecord(SubmitApplicationRequest.class);
appRequest.setApplicationSubmissionContext(appContext);
yarnClient.submitApplication(appContext);
int iterationsLeft=30;
RMAppAttempt appAttempt=null;
while (iterationsLeft > 0) {
ApplicationReport appReport=yarnClient.getApplicationReport(appId);
if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
attemptId=appReport.getCurrentApplicationAttemptId();
appAttempt=yarnCluster.getResourceManager().getRMContext().getRMApps().get(attemptId.getApplicationId()).getCurrentAppAttempt();
while (true) {
if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) {
break;
}
}
break;
}
sleep(1000);
--iterationsLeft;
}
if (iterationsLeft == 0) {
fail("Application hasn't bee started");
}
UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken());
nmTokenCache=new NMTokenCache();
rmClient=(AMRMClientImpl)AMRMClient.createAMRMClient();
rmClient.setNMTokenCache(nmTokenCache);
rmClient.init(conf);
rmClient.start();
assertNotNull(rmClient);
assertEquals(STATE.STARTED,rmClient.getServiceState());
nmClient=(NMClientImpl)NMClient.createNMClient();
nmClient.setNMTokenCache(rmClient.getNMTokenCache());
nmClient.init(conf);
nmClient.start();
assertNotNull(nmClient);
assertEquals(STATE.STARTED,nmClient.getServiceState());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=200000) public void testNMClient() throws YarnException, IOException {
rmClient.registerApplicationMaster("Host",10000,"");
testContainerManagement(nmClient,allocateContainers(rmClient,5));
rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null);
assertFalse(nmClient.startedContainers.isEmpty());
nmClient.cleanupRunningContainersOnStop(true);
assertTrue(nmClient.getCleanupRunningContainers().get());
nmClient.stop();
}
InternalCallVerifier BooleanVerifier
@Test public void testHasDelegationTokens() throws Exception {
TimelineAuthenticator authenticator=new TimelineAuthenticator();
Assert.assertFalse(authenticator.hasDelegationToken(new URL("http://localhost:8/resource")));
Assert.assertFalse(authenticator.hasDelegationToken(new URL("http://localhost:8/resource?other=xxxx")));
Assert.assertTrue(authenticator.hasDelegationToken(new URL("http://localhost:8/resource?delegation=yyyy")));
Assert.assertTrue(authenticator.hasDelegationToken(new URL("http://localhost:8/resource?other=xxxx&delegation=yyyy")));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntities() throws Exception {
mockClientResponse(client,ClientResponse.Status.OK,false,false);
try {
TimelinePutResponse response=client.putEntities(generateEntity());
Assert.assertEquals(0,response.getErrors().size());
}
catch ( YarnException e) {
Assert.fail("Exception is not expected");
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntitiesWithError() throws Exception {
mockClientResponse(client,ClientResponse.Status.OK,true,false);
try {
TimelinePutResponse response=client.putEntities(generateEntity());
Assert.assertEquals(1,response.getErrors().size());
Assert.assertEquals("test entity id",response.getErrors().get(0).getEntityId());
Assert.assertEquals("test entity type",response.getErrors().get(0).getEntityType());
Assert.assertEquals(TimelinePutResponse.TimelinePutError.IO_EXCEPTION,response.getErrors().get(0).getErrorCode());
}
catch ( YarnException e) {
Assert.fail("Exception is not expected");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntitiesTimelineServiceDefaultNotEnabled() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.unset(YarnConfiguration.TIMELINE_SERVICE_ENABLED);
TimelineClientImpl client=createTimelineClient(conf);
mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false);
try {
TimelinePutResponse response=client.putEntities(generateEntity());
Assert.assertEquals(0,response.getErrors().size());
}
catch ( YarnException e) {
Assert.fail("putEntities should already return before throwing the exception");
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntitiesTimelineServiceNotEnabled() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,false);
TimelineClientImpl client=createTimelineClient(conf);
mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false);
try {
TimelinePutResponse response=client.putEntities(generateEntity());
Assert.assertEquals(0,response.getErrors().size());
}
catch ( YarnException e) {
Assert.fail("putEntities should already return before throwing the exception");
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetApplicationAttempts() throws YarnException, IOException {
Configuration conf=new Configuration();
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
List reports=client.getApplicationAttempts(applicationId);
Assert.assertNotNull(reports);
Assert.assertEquals(reports.get(0).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,1));
Assert.assertEquals(reports.get(1).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,2));
client.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetContainers() throws YarnException, IOException {
Configuration conf=new Configuration();
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
List reports=client.getContainers(appAttemptId);
Assert.assertNotNull(reports);
Assert.assertEquals(reports.get(0).getContainerId(),(ContainerId.newInstance(appAttemptId,1)));
Assert.assertEquals(reports.get(1).getContainerId(),(ContainerId.newInstance(appAttemptId,2)));
client.stop();
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier NullVerifier HybridVerifier
@Test(timeout=30000) public void testAMMRTokens() throws Exception {
MiniYARNCluster cluster=new MiniYARNCluster("testMRAMTokens",1,1,1);
YarnClient rmClient=null;
try {
cluster.init(new YarnConfiguration());
cluster.start();
final Configuration yarnConf=cluster.getConfig();
rmClient=YarnClient.createYarnClient();
rmClient.init(yarnConf);
rmClient.start();
ApplicationId appId=createApp(rmClient,false);
waitTillAccepted(rmClient,appId);
Assert.assertNull(rmClient.getAMRMToken(appId));
appId=createApp(rmClient,true);
waitTillAccepted(rmClient,appId);
long start=System.currentTimeMillis();
while (rmClient.getAMRMToken(appId) == null) {
if (System.currentTimeMillis() - start > 20 * 1000) {
Assert.fail("AMRM token is null");
}
Thread.sleep(100);
}
Assert.assertNotNull(rmClient.getAMRMToken(appId));
UserGroupInformation other=UserGroupInformation.createUserForTesting("foo",new String[]{});
appId=other.doAs(new PrivilegedExceptionAction(){
@Override public ApplicationId run() throws Exception {
YarnClient rmClient=YarnClient.createYarnClient();
rmClient.init(yarnConf);
rmClient.start();
ApplicationId appId=createApp(rmClient,true);
waitTillAccepted(rmClient,appId);
long start=System.currentTimeMillis();
while (rmClient.getAMRMToken(appId) == null) {
if (System.currentTimeMillis() - start > 20 * 1000) {
Assert.fail("AMRM token is null");
}
Thread.sleep(100);
}
Assert.assertNotNull(rmClient.getAMRMToken(appId));
return appId;
}
}
);
Assert.assertNull(rmClient.getAMRMToken(appId));
}
finally {
if (rmClient != null) {
rmClient.stop();
}
cluster.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testApplicationType() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm=new MockRM();
rm.start();
RMApp app=rm.submitApp(2000);
RMApp app1=rm.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE");
Assert.assertEquals("YARN",app.getApplicationType());
Assert.assertEquals("MAPREDUCE",app1.getApplicationType());
rm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetApplications() throws YarnException, IOException {
Configuration conf=new Configuration();
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
List expectedReports=((MockYarnClient)client).getReports();
List reports=client.getApplications();
Assert.assertEquals(reports,expectedReports);
Set appTypes=new HashSet();
appTypes.add("YARN");
appTypes.add("NON-YARN");
reports=client.getApplications(appTypes,null);
Assert.assertEquals(reports.size(),2);
Assert.assertTrue((reports.get(0).getApplicationType().equals("YARN") && reports.get(1).getApplicationType().equals("NON-YARN")) || (reports.get(1).getApplicationType().equals("YARN") && reports.get(0).getApplicationType().equals("NON-YARN")));
for ( ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
EnumSet appStates=EnumSet.noneOf(YarnApplicationState.class);
appStates.add(YarnApplicationState.FINISHED);
appStates.add(YarnApplicationState.FAILED);
reports=client.getApplications(null,appStates);
Assert.assertEquals(reports.size(),2);
Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN") && reports.get(1).getApplicationType().equals("NON-MAPREDUCE")) || (reports.get(1).getApplicationType().equals("NON-YARN") && reports.get(0).getApplicationType().equals("NON-MAPREDUCE")));
for ( ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
reports=client.getApplications(appTypes,appStates);
Assert.assertEquals(reports.size(),1);
Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN")));
for ( ApplicationReport report : reports) {
Assert.assertTrue(expectedReports.contains(report));
}
client.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testAutomaticTimelineDelegationTokenLoading() throws Exception {
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,true);
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,conf);
final Token dToken=new Token();
YarnClientImpl client=new YarnClientImpl(){
@Override protected void serviceInit( Configuration conf) throws Exception {
if (getConfig().getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) {
timelineServiceEnabled=true;
timelineClient=mock(TimelineClient.class);
when(timelineClient.getDelegationToken(any(String.class))).thenReturn(dToken);
timelineClient.init(getConfig());
timelineService=TimelineUtils.buildTimelineTokenService(getConfig());
}
this.setConfig(conf);
}
@Override protected void serviceStart() throws Exception {
rmClient=mock(ApplicationClientProtocol.class);
}
@Override protected void serviceStop() throws Exception {
}
@Override public ApplicationReport getApplicationReport( ApplicationId appId){
ApplicationReport report=mock(ApplicationReport.class);
when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.SUBMITTED);
return report;
}
@Override public boolean isSecurityEnabled(){
return true;
}
}
;
client.init(conf);
client.start();
ApplicationSubmissionContext context=mock(ApplicationSubmissionContext.class);
ApplicationId applicationId=ApplicationId.newInstance(0,1);
when(context.getApplicationId()).thenReturn(applicationId);
DataOutputBuffer dob=new DataOutputBuffer();
Credentials credentials=new Credentials();
credentials.writeTokenStorageToStream(dob);
ByteBuffer tokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength());
ContainerLaunchContext clc=ContainerLaunchContext.newInstance(null,null,null,null,tokens,null);
when(context.getAMContainerSpec()).thenReturn(clc);
client.submitApplication(context);
credentials=new Credentials();
DataInputByteBuffer dibb=new DataInputByteBuffer();
tokens=clc.getTokens();
if (tokens != null) {
dibb.reset(tokens);
credentials.readTokenStorageStream(dibb);
tokens.rewind();
}
Collection> dTokens=credentials.getAllTokens();
Assert.assertEquals(1,dTokens.size());
Assert.assertEquals(dToken,dTokens.iterator().next());
client.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testApplicationTypeLimit() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm=new MockRM();
rm.start();
RMApp app1=rm.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE-LENGTH-IS-20");
Assert.assertEquals("MAPREDUCE-LENGTH-IS-",app1.getApplicationType());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetApplicationAttempt() throws YarnException, IOException {
Configuration conf=new Configuration();
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
List expectedReports=((MockYarnClient)client).getReports();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ApplicationAttemptReport report=client.getApplicationAttemptReport(appAttemptId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getApplicationAttemptId().toString(),expectedReports.get(0).getCurrentApplicationAttemptId().toString());
client.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testGetContainerReport() throws YarnException, IOException {
Configuration conf=new Configuration();
final YarnClient client=new MockYarnClient();
client.init(conf);
client.start();
List expectedReports=((MockYarnClient)client).getReports();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
ContainerReport report=client.getContainerReport(containerId);
Assert.assertNotNull(report);
Assert.assertEquals(report.getContainerId().toString(),(ContainerId.newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(),1)).toString());
client.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000l) public void testHelpMessage() throws Exception {
Configuration conf=new YarnConfiguration();
YarnClient mockYarnClient=createMockYarnClient(YarnApplicationState.FINISHED);
LogsCLI dumper=new LogsCLIForTest(mockYarnClient);
dumper.setConf(conf);
int exitCode=dumper.run(new String[]{});
assertTrue(exitCode == -1);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Retrieve logs for completed YARN applications.");
pw.println("usage: yarn logs -applicationId [OPTIONS]");
pw.println();
pw.println("general options are:");
pw.println(" -appOwner AppOwner (assumed to be current user if");
pw.println(" not specified)");
pw.println(" -containerId ContainerId (must be specified if node");
pw.println(" address is specified)");
pw.println(" -nodeAddress NodeAddress in the format nodename:port");
pw.println(" (must be specified if container id is");
pw.println(" specified)");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000l) public void testFailResultCodes() throws Exception {
Configuration conf=new YarnConfiguration();
conf.setClass("fs.file.impl",LocalFileSystem.class,FileSystem.class);
LogCLIHelpers cliHelper=new LogCLIHelpers();
cliHelper.setConf(conf);
YarnClient mockYarnClient=createMockYarnClient(YarnApplicationState.FINISHED);
LogsCLI dumper=new LogsCLIForTest(mockYarnClient);
dumper.setConf(conf);
int exitCode=dumper.run(new String[]{"-applicationId","application_0_0"});
assertTrue("Should return an error code",exitCode != 0);
exitCode=cliHelper.dumpAContainersLogs("application_0_0","container_0_0","nonexistentnode:1234","nobody");
assertTrue("Should return an error code",exitCode != 0);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000l) public void testInvalidApplicationId() throws Exception {
Configuration conf=new YarnConfiguration();
YarnClient mockYarnClient=createMockYarnClient(YarnApplicationState.FINISHED);
LogsCLI cli=new LogsCLIForTest(mockYarnClient);
cli.setConf(conf);
int exitCode=cli.run(new String[]{"-applicationId","not_an_app_id"});
assertTrue(exitCode == -1);
assertTrue(sysErrStream.toString().startsWith("Invalid ApplicationId specified"));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000l) public void testUnknownApplicationId() throws Exception {
Configuration conf=new YarnConfiguration();
YarnClient mockYarnClient=createMockYarnClientUnknownApp();
LogsCLI cli=new LogsCLIForTest(mockYarnClient);
cli.setConf(conf);
int exitCode=cli.run(new String[]{"-applicationId",ApplicationId.newInstance(1,1).toString()});
assertTrue(exitCode != 0);
assertTrue(sysErrStream.toString().startsWith("Unable to get ApplicationState"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetApplicationAttempts() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1);
ApplicationAttemptId attemptId1=ApplicationAttemptId.newInstance(applicationId,2);
ApplicationAttemptReport attemptReport=ApplicationAttemptReport.newInstance(attemptId,"host",124,"url","diagnostics",YarnApplicationAttemptState.FINISHED,ContainerId.newInstance(attemptId,1));
ApplicationAttemptReport attemptReport1=ApplicationAttemptReport.newInstance(attemptId1,"host",124,"url","diagnostics",YarnApplicationAttemptState.FINISHED,ContainerId.newInstance(attemptId1,1));
List reports=new ArrayList();
reports.add(attemptReport);
reports.add(attemptReport1);
when(client.getApplicationAttempts(any(ApplicationId.class))).thenReturn(reports);
int result=cli.run(new String[]{"applicationattempt","-list",applicationId.toString()});
assertEquals(0,result);
verify(client).getApplicationAttempts(applicationId);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Total number of application attempts :2");
pw.print(" ApplicationAttempt-Id");
pw.print("\t State");
pw.print("\t AM-Container-Id");
pw.println("\t Tracking-URL");
pw.print(" appattempt_1234_0005_000001");
pw.print("\t FINISHED");
pw.print("\t container_1234_0005_01_000001");
pw.println("\t url");
pw.print(" appattempt_1234_0005_000002");
pw.print("\t FINISHED");
pw.print("\t container_1234_0005_02_000001");
pw.println("\t url");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetContainers() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
ContainerId containerId1=ContainerId.newInstance(attemptId,2);
ContainerReport container=ContainerReport.newInstance(containerId,null,NodeId.newInstance("host",1234),Priority.UNDEFINED,1234,5678,"diagnosticInfo","logURL",0,ContainerState.COMPLETE);
ContainerReport container1=ContainerReport.newInstance(containerId1,null,NodeId.newInstance("host",1234),Priority.UNDEFINED,1234,5678,"diagnosticInfo","logURL",0,ContainerState.COMPLETE);
List reports=new ArrayList();
reports.add(container);
reports.add(container1);
when(client.getContainers(any(ApplicationAttemptId.class))).thenReturn(reports);
int result=cli.run(new String[]{"container","-list",attemptId.toString()});
assertEquals(0,result);
verify(client).getContainers(attemptId);
Log.info(sysOutStream.toString());
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Total number of containers :2");
pw.print(" Container-Id");
pw.print("\t Start Time");
pw.print("\t Finish Time");
pw.print("\t State");
pw.print("\t Host");
pw.println("\t LOG-URL");
pw.print(" container_1234_0005_01_000001");
pw.print("\t 1234");
pw.print("\t 5678");
pw.print("\t COMPLETE");
pw.print("\t host:1234");
pw.println("\t logURL");
pw.print(" container_1234_0005_01_000002");
pw.print("\t 1234");
pw.print("\t 5678");
pw.print("\t COMPLETE");
pw.print("\t host:1234");
pw.println("\t logURL");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
}
InternalCallVerifier EqualityVerifier
@Test public void testMissingArguments() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
int result=cli.run(new String[]{"application","-status"});
Assert.assertEquals(result,-1);
Assert.assertEquals(String.format("Missing argument for options%n%1s",createApplicationCLIHelpMessage()),sysOutStream.toString());
sysOutStream.reset();
result=cli.run(new String[]{"applicationattempt","-status"});
Assert.assertEquals(result,-1);
Assert.assertEquals(String.format("Missing argument for options%n%1s",createApplicationAttemptCLIHelpMessage()),sysOutStream.toString());
sysOutStream.reset();
result=cli.run(new String[]{"container","-status"});
Assert.assertEquals(result,-1);
Assert.assertEquals(String.format("Missing argument for options%n%1s",createContainerCLIHelpMessage()),sysOutStream.toString());
sysOutStream.reset();
NodeCLI nodeCLI=new NodeCLI();
nodeCLI.setClient(client);
nodeCLI.setSysOutPrintStream(sysOut);
nodeCLI.setSysErrPrintStream(sysErr);
result=nodeCLI.run(new String[]{"-status"});
Assert.assertEquals(result,-1);
Assert.assertEquals(String.format("Missing argument for options%n%1s",createNodeCLIHelpMessage()),sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAppsHelpCommand() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationCLI spyCli=spy(cli);
int result=spyCli.run(new String[]{"application","-help"});
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
result=cli.run(new String[]{"application","-kill",applicationId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
NodeId nodeId=NodeId.newInstance("host0",0);
result=cli.run(new String[]{"application","-status",nodeId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString());
}
UtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testKillApplication() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport2);
int result=cli.run(new String[]{"application","-kill",applicationId.toString()});
assertEquals(0,result);
verify(client,times(0)).killApplication(any(ApplicationId.class));
verify(sysOut).println("Application " + applicationId + " has already finished ");
ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport);
result=cli.run(new String[]{"application","-kill",applicationId.toString()});
assertEquals(0,result);
verify(client).killApplication(any(ApplicationId.class));
verify(sysOut).println("Killing application application_1234_0005");
doThrow(new ApplicationNotFoundException("Application with id '" + applicationId + "' doesn't exist in RM.")).when(client).getApplicationReport(applicationId);
cli=createAndGetAppCLI();
try {
int exitCode=cli.run(new String[]{"application","-kill",applicationId.toString()});
verify(sysOut).println("Application with id '" + applicationId + "' doesn't exist in RM.");
Assert.assertNotSame("should return non-zero exit code.",0,exitCode);
}
catch ( ApplicationNotFoundException appEx) {
Assert.fail("application -kill should not throw" + "ApplicationNotFoundException. " + appEx);
}
catch ( Exception e) {
Assert.fail("Unexpected exception: " + e);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testContainersHelpCommand() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationCLI spyCli=spy(cli);
int result=spyCli.run(new String[]{"container","-help"});
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6);
result=cli.run(new String[]{"container","-list",appAttemptId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ContainerId containerId=ContainerId.newInstance(appAttemptId,7);
result=cli.run(new String[]{"container","-status",containerId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString());
}
InternalCallVerifier EqualityVerifier
@Test public void testAbsentNodeStatus() throws Exception {
NodeId nodeId=NodeId.newInstance("Absenthost0",0);
NodeCLI cli=new NodeCLI();
when(client.getNodeReports()).thenReturn(getNodeReports(0,NodeState.RUNNING));
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
int result=cli.run(new String[]{"-status",nodeId.toString()});
assertEquals(0,result);
verify(client).getNodeReports();
verify(sysOut,times(1)).println(isA(String.class));
verify(sysOut).println("Could not find the node report for node id : " + nodeId.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testListClusterNodes() throws Exception {
List nodeReports=new ArrayList();
nodeReports.addAll(getNodeReports(1,NodeState.NEW));
nodeReports.addAll(getNodeReports(2,NodeState.RUNNING));
nodeReports.addAll(getNodeReports(1,NodeState.UNHEALTHY));
nodeReports.addAll(getNodeReports(1,NodeState.DECOMMISSIONED));
nodeReports.addAll(getNodeReports(1,NodeState.REBOOTED));
nodeReports.addAll(getNodeReports(1,NodeState.LOST));
NodeCLI cli=new NodeCLI();
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
Set nodeStates=new HashSet();
nodeStates.add(NodeState.NEW);
NodeState[] states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
int result=cli.run(new String[]{"-list","--states","NEW"});
assertEquals(0,result);
verify(client).getNodeReports(states);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t NEW\t host1:8888\t");
pw.println(" 0");
pw.close();
String nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(1)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.RUNNING);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","RUNNING"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:2");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host1:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(2)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
result=cli.run(new String[]{"-list"});
assertEquals(0,result);
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(3)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.UNHEALTHY);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","UNHEALTHY"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t UNHEALTHY\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(4)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.DECOMMISSIONED);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","DECOMMISSIONED"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(5)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.REBOOTED);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","REBOOTED"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t REBOOTED\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(6)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.LOST);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","LOST"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:1");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t LOST\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(7)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
nodeStates.add(NodeState.NEW);
nodeStates.add(NodeState.RUNNING);
nodeStates.add(NodeState.LOST);
nodeStates.add(NodeState.REBOOTED);
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--states","NEW,RUNNING,LOST,REBOOTED"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:5");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t NEW\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host1:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t REBOOTED\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t LOST\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(8)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
nodeStates.clear();
for ( NodeState s : NodeState.values()) {
nodeStates.add(s);
}
states=nodeStates.toArray(new NodeState[0]);
when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates));
result=cli.run(new String[]{"-list","--all"});
assertEquals(0,result);
verify(client).getNodeReports(states);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total Nodes:7");
pw.print(" Node-Id\t Node-State\tNode-Http-Address\t");
pw.println("Number-of-Running-Containers");
pw.print(" host0:0\t NEW\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host1:0\t RUNNING\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t UNHEALTHY\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t REBOOTED\t host1:8888\t");
pw.println(" 0");
pw.print(" host0:0\t LOST\t host1:8888\t");
pw.println(" 0");
pw.close();
nodesReportStr=baos.toString("UTF-8");
Assert.assertEquals(nodesReportStr,sysOutStream.toString());
verify(sysOut,times(9)).write(any(byte[].class),anyInt(),anyInt());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetApplicationReportException() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
when(client.getApplicationReport(any(ApplicationId.class))).thenThrow(new ApplicationNotFoundException("History file for application" + applicationId + " is not found"));
try {
cli.run(new String[]{"application","-status",applicationId.toString()});
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex instanceof ApplicationNotFoundException);
Assert.assertEquals("History file for application" + applicationId + " is not found",ex.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testAppAttemptsHelpCommand() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationCLI spyCli=spy(cli);
int result=spyCli.run(new String[]{"applicationattempt","-help"});
Assert.assertTrue(result == 0);
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
result=cli.run(new String[]{"applicationattempt","-list",applicationId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString());
sysOutStream.reset();
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6);
result=cli.run(new String[]{"applicationattempt","-status",appAttemptId.toString(),"args"});
verify(spyCli).printUsage(any(String.class),any(Options.class));
Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetApplicationAttemptReport() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1);
ApplicationAttemptReport attemptReport=ApplicationAttemptReport.newInstance(attemptId,"host",124,"url","diagnostics",YarnApplicationAttemptState.FINISHED,ContainerId.newInstance(attemptId,1));
when(client.getApplicationAttemptReport(any(ApplicationAttemptId.class))).thenReturn(attemptReport);
int result=cli.run(new String[]{"applicationattempt","-status",attemptId.toString()});
assertEquals(0,result);
verify(client).getApplicationAttemptReport(attemptId);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Application Attempt Report : ");
pw.println("\tApplicationAttempt-Id : appattempt_1234_0005_000001");
pw.println("\tState : FINISHED");
pw.println("\tAMContainer : container_1234_0005_01_000001");
pw.println("\tTracking-URL : url");
pw.println("\tRPC Port : 124");
pw.println("\tAM Host : host");
pw.println("\tDiagnostics : diagnostics");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
verify(sysOut,times(1)).println(isA(String.class));
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeStatus() throws Exception {
NodeId nodeId=NodeId.newInstance("host0",0);
NodeCLI cli=new NodeCLI();
when(client.getNodeReports()).thenReturn(getNodeReports(3,NodeState.RUNNING));
cli.setClient(client);
cli.setSysOutPrintStream(sysOut);
cli.setSysErrPrintStream(sysErr);
int result=cli.run(new String[]{"-status",nodeId.toString()});
assertEquals(0,result);
verify(client).getNodeReports();
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Node Report : ");
pw.println("\tNode-Id : host0:0");
pw.println("\tRack : rack1");
pw.println("\tNode-State : RUNNING");
pw.println("\tNode-Http-Address : host1:8888");
pw.println("\tLast-Health-Update : " + DateFormatUtils.format(new Date(0),"E dd/MMM/yy hh:mm:ss:SSzz"));
pw.println("\tHealth-Report : ");
pw.println("\tContainers : 0");
pw.println("\tMemory-Used : 0MB");
pw.println("\tMemory-Capacity : 0MB");
pw.println("\tCPU-Used : 0 vcores");
pw.println("\tCPU-Capacity : 0 vcores");
pw.close();
String nodeStatusStr=baos.toString("UTF-8");
verify(sysOut,times(1)).println(isA(String.class));
verify(sysOut).println(nodeStatusStr);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetApplications() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
List applicationReports=new ArrayList();
applicationReports.add(newApplicationReport);
ApplicationId applicationId2=ApplicationId.newInstance(1234,6);
ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId2,ApplicationAttemptId.newInstance(applicationId2,2),"user2","queue2","appname2","host2",125,null,YarnApplicationState.FINISHED,"diagnostics2","url2",2,2,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.63789f,"NON-YARN",null);
applicationReports.add(newApplicationReport2);
ApplicationId applicationId3=ApplicationId.newInstance(1234,7);
ApplicationReport newApplicationReport3=ApplicationReport.newInstance(applicationId3,ApplicationAttemptId.newInstance(applicationId3,3),"user3","queue3","appname3","host3",126,null,YarnApplicationState.RUNNING,"diagnostics3","url3",3,3,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.73789f,"MAPREDUCE",null);
applicationReports.add(newApplicationReport3);
ApplicationId applicationId4=ApplicationId.newInstance(1234,8);
ApplicationReport newApplicationReport4=ApplicationReport.newInstance(applicationId4,ApplicationAttemptId.newInstance(applicationId4,4),"user4","queue4","appname4","host4",127,null,YarnApplicationState.FAILED,"diagnostics4","url4",4,4,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.83789f,"NON-MAPREDUCE",null);
applicationReports.add(newApplicationReport4);
ApplicationId applicationId5=ApplicationId.newInstance(1234,9);
ApplicationReport newApplicationReport5=ApplicationReport.newInstance(applicationId5,ApplicationAttemptId.newInstance(applicationId5,5),"user5","queue5","appname5","host5",128,null,YarnApplicationState.ACCEPTED,"diagnostics5","url5",5,5,FinalApplicationStatus.KILLED,null,"N/A",0.93789f,"HIVE",null);
applicationReports.add(newApplicationReport5);
ApplicationId applicationId6=ApplicationId.newInstance(1234,10);
ApplicationReport newApplicationReport6=ApplicationReport.newInstance(applicationId6,ApplicationAttemptId.newInstance(applicationId6,6),"user6","queue6","appname6","host6",129,null,YarnApplicationState.SUBMITTED,"diagnostics6","url6",6,6,FinalApplicationStatus.KILLED,null,"N/A",0.99789f,"PIG",null);
applicationReports.add(newApplicationReport6);
Set appType1=new HashSet();
EnumSet appState1=EnumSet.noneOf(YarnApplicationState.class);
appState1.add(YarnApplicationState.RUNNING);
appState1.add(YarnApplicationState.ACCEPTED);
appState1.add(YarnApplicationState.SUBMITTED);
when(client.getApplications(appType1,appState1)).thenReturn(getApplicationReports(applicationReports,appType1,appState1,false));
int result=cli.run(new String[]{"application","-list"});
assertEquals(0,result);
verify(client).getApplications(appType1,appState1);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType1 + " and states: "+ appState1+ ")"+ ":"+ 4);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t RUNNING\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.println("\t N/A");
pw.print(" application_1234_0007\t ");
pw.print("appname3\t MAPREDUCE\t user3\t ");
pw.print("queue3\t RUNNING\t ");
pw.print("SUCCEEDED\t 73.79%");
pw.println("\t N/A");
pw.print(" application_1234_0009\t ");
pw.print("appname5\t HIVE\t user5\t ");
pw.print("queue5\t ACCEPTED\t ");
pw.print("KILLED\t 93.79%");
pw.println("\t N/A");
pw.print(" application_1234_0010\t ");
pw.print("appname6\t PIG\t user6\t ");
pw.print("queue6\t SUBMITTED\t ");
pw.print("KILLED\t 99.79%");
pw.println("\t N/A");
pw.close();
String appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(1)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType2=new HashSet();
appType2.add("YARN");
appType2.add("NON-YARN");
EnumSet appState2=EnumSet.noneOf(YarnApplicationState.class);
appState2.add(YarnApplicationState.RUNNING);
appState2.add(YarnApplicationState.ACCEPTED);
appState2.add(YarnApplicationState.SUBMITTED);
when(client.getApplications(appType2,appState2)).thenReturn(getApplicationReports(applicationReports,appType2,appState2,false));
result=cli.run(new String[]{"application","-list","-appTypes","YARN, ,, NON-YARN"," ,, ,,"});
assertEquals(0,result);
verify(client).getApplications(appType2,appState2);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType2 + " and states: "+ appState2+ ")"+ ":"+ 1);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t RUNNING\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(2)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType3=new HashSet();
EnumSet appState3=EnumSet.noneOf(YarnApplicationState.class);
appState3.add(YarnApplicationState.FINISHED);
appState3.add(YarnApplicationState.FAILED);
when(client.getApplications(appType3,appState3)).thenReturn(getApplicationReports(applicationReports,appType3,appState3,false));
result=cli.run(new String[]{"application","-list","--appStates","FINISHED ,, , FAILED",",,FINISHED"});
assertEquals(0,result);
verify(client).getApplications(appType3,appState3);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType3 + " and states: "+ appState3+ ")"+ ":"+ 2);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.print(" application_1234_0008\t ");
pw.print("appname4\t NON-MAPREDUCE\t user4\t ");
pw.print("queue4\t FAILED\t ");
pw.print("SUCCEEDED\t 83.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(3)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType4=new HashSet();
appType4.add("YARN");
appType4.add("NON-YARN");
EnumSet appState4=EnumSet.noneOf(YarnApplicationState.class);
appState4.add(YarnApplicationState.FINISHED);
appState4.add(YarnApplicationState.FAILED);
when(client.getApplications(appType4,appState4)).thenReturn(getApplicationReports(applicationReports,appType4,appState4,false));
result=cli.run(new String[]{"application","-list","--appTypes","YARN,NON-YARN","--appStates","FINISHED ,, , FAILED"});
assertEquals(0,result);
verify(client).getApplications(appType2,appState2);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType4 + " and states: "+ appState4+ ")"+ ":"+ 1);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(4)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
result=cli.run(new String[]{"application","-list","--appStates","FINISHED ,, , INVALID"});
assertEquals(-1,result);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("The application state INVALID is invalid.");
pw.print("The valid application state can be one of the following: ");
StringBuilder sb=new StringBuilder();
sb.append("ALL,");
for ( YarnApplicationState state : YarnApplicationState.values()) {
sb.append(state + ",");
}
String output=sb.toString();
pw.println(output.substring(0,output.length() - 1));
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(4)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType5=new HashSet();
EnumSet appState5=EnumSet.noneOf(YarnApplicationState.class);
appState5.add(YarnApplicationState.FINISHED);
when(client.getApplications(appType5,appState5)).thenReturn(getApplicationReports(applicationReports,appType5,appState5,true));
result=cli.run(new String[]{"application","-list","--appStates","FINISHED ,, , ALL"});
assertEquals(0,result);
verify(client).getApplications(appType5,appState5);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType5 + " and states: "+ appState5+ ")"+ ":"+ 6);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0005\t ");
pw.print("appname\t YARN\t user\t ");
pw.print("queue\t RUNNING\t ");
pw.print("SUCCEEDED\t 53.79%");
pw.println("\t N/A");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.print(" application_1234_0007\t ");
pw.print("appname3\t MAPREDUCE\t user3\t ");
pw.print("queue3\t RUNNING\t ");
pw.print("SUCCEEDED\t 73.79%");
pw.println("\t N/A");
pw.print(" application_1234_0008\t ");
pw.print("appname4\t NON-MAPREDUCE\t user4\t ");
pw.print("queue4\t FAILED\t ");
pw.print("SUCCEEDED\t 83.79%");
pw.println("\t N/A");
pw.print(" application_1234_0009\t ");
pw.print("appname5\t HIVE\t user5\t ");
pw.print("queue5\t ACCEPTED\t ");
pw.print("KILLED\t 93.79%");
pw.println("\t N/A");
pw.print(" application_1234_0010\t ");
pw.print("appname6\t PIG\t user6\t ");
pw.print("queue6\t SUBMITTED\t ");
pw.print("KILLED\t 99.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(5)).write(any(byte[].class),anyInt(),anyInt());
sysOutStream.reset();
Set appType6=new HashSet();
appType6.add("YARN");
appType6.add("NON-YARN");
EnumSet appState6=EnumSet.noneOf(YarnApplicationState.class);
appState6.add(YarnApplicationState.FINISHED);
when(client.getApplications(appType6,appState6)).thenReturn(getApplicationReports(applicationReports,appType6,appState6,false));
result=cli.run(new String[]{"application","-list","-appTypes","YARN, ,, NON-YARN","--appStates","finished"});
assertEquals(0,result);
verify(client).getApplications(appType6,appState6);
baos=new ByteArrayOutputStream();
pw=new PrintWriter(baos);
pw.println("Total number of applications (application-types: " + appType6 + " and states: "+ appState6+ ")"+ ":"+ 1);
pw.print(" Application-Id\t Application-Name");
pw.print("\t Application-Type");
pw.print("\t User\t Queue\t State\t ");
pw.print("Final-State\t Progress");
pw.println("\t Tracking-URL");
pw.print(" application_1234_0006\t ");
pw.print("appname2\t NON-YARN\t user2\t ");
pw.print("queue2\t FINISHED\t ");
pw.print("SUCCEEDED\t 63.79%");
pw.println("\t N/A");
pw.close();
appsReportStr=baos.toString("UTF-8");
Assert.assertEquals(appsReportStr,sysOutStream.toString());
verify(sysOut,times(6)).write(any(byte[].class),anyInt(),anyInt());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetApplicationReport() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport);
int result=cli.run(new String[]{"application","-status",applicationId.toString()});
assertEquals(0,result);
verify(client).getApplicationReport(applicationId);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Application Report : ");
pw.println("\tApplication-Id : application_1234_0005");
pw.println("\tApplication-Name : appname");
pw.println("\tApplication-Type : YARN");
pw.println("\tUser : user");
pw.println("\tQueue : queue");
pw.println("\tStart-Time : 0");
pw.println("\tFinish-Time : 0");
pw.println("\tProgress : 53.79%");
pw.println("\tState : FINISHED");
pw.println("\tFinal-State : SUCCEEDED");
pw.println("\tTracking-URL : N/A");
pw.println("\tRPC Port : 124");
pw.println("\tAM Host : host");
pw.println("\tDiagnostics : diagnostics");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
verify(sysOut,times(1)).println(isA(String.class));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetContainerReport() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
ContainerReport container=ContainerReport.newInstance(containerId,null,NodeId.newInstance("host",1234),Priority.UNDEFINED,1234,5678,"diagnosticInfo","logURL",0,ContainerState.COMPLETE);
when(client.getContainerReport(any(ContainerId.class))).thenReturn(container);
int result=cli.run(new String[]{"container","-status",containerId.toString()});
assertEquals(0,result);
verify(client).getContainerReport(containerId);
ByteArrayOutputStream baos=new ByteArrayOutputStream();
PrintWriter pw=new PrintWriter(baos);
pw.println("Container Report : ");
pw.println("\tContainer-Id : container_1234_0005_01_000001");
pw.println("\tStart-Time : 1234");
pw.println("\tFinish-Time : 5678");
pw.println("\tState : COMPLETE");
pw.println("\tLOG-URL : logURL");
pw.println("\tHost : host:1234");
pw.println("\tDiagnostics : diagnosticInfo");
pw.close();
String appReportStr=baos.toString("UTF-8");
Assert.assertEquals(appReportStr,sysOutStream.toString());
verify(sysOut,times(1)).println(isA(String.class));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveApplicationAcrossQueues() throws Exception {
ApplicationCLI cli=createAndGetAppCLI();
ApplicationId applicationId=ApplicationId.newInstance(1234,5);
ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport2);
int result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"});
assertEquals(0,result);
verify(client,times(0)).moveApplicationAcrossQueues(any(ApplicationId.class),any(String.class));
verify(sysOut).println("Application " + applicationId + " has already finished ");
ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null);
when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport);
result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"});
assertEquals(0,result);
verify(client).moveApplicationAcrossQueues(any(ApplicationId.class),any(String.class));
verify(sysOut).println("Moving application application_1234_0005 to queue targetqueue");
verify(sysOut).println("Successfully completed move.");
doThrow(new ApplicationNotFoundException("Application with id '" + applicationId + "' doesn't exist in RM.")).when(client).moveApplicationAcrossQueues(applicationId,"targetqueue");
cli=createAndGetAppCLI();
try {
result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"});
Assert.fail();
}
catch ( Exception ex) {
Assert.assertTrue(ex instanceof ApplicationNotFoundException);
Assert.assertEquals("Application with id '" + applicationId + "' doesn't exist in RM.",ex.getMessage());
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testVerifyAndSetConfiguration() throws Exception {
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
fail("Should not throw any exceptions.");
}
assertEquals("Should be saved as Trimmed collection",StringUtils.getStringCollection(RM_NODE_IDS),HAUtil.getRMHAIds(conf));
assertEquals("Should be saved as Trimmed string",RM1_NODE_ID,HAUtil.getRMHAId(conf));
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
assertEquals("RPC address not set for " + confKey,RM1_ADDRESS,conf.get(confKey));
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID);
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
assertEquals("YarnRuntimeException by verifyAndSetRMHAIds()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_IDS,conf.get(YarnConfiguration.RM_HA_IDS) + "\nHA mode requires atleast two RMs"),e.getMessage());
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID);
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(HAUtil.addSuffix(confKey,RM1_NODE_ID),RM1_ADDRESS);
conf.set(HAUtil.addSuffix(confKey,RM2_NODE_ID),RM2_ADDRESS);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
assertEquals("YarnRuntimeException by getRMId()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getNeedToSetValueMessage(YarnConfiguration.RM_HA_ID),e.getMessage());
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_ID,RM_INVALID_NODE_ID);
conf.set(YarnConfiguration.RM_HA_IDS,RM_INVALID_NODE_ID + "," + RM1_NODE_ID);
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(confKey + RM_INVALID_NODE_ID,RM_INVALID_NODE_ID);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
assertEquals("YarnRuntimeException by addSuffix()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_ID,RM_INVALID_NODE_ID),e.getMessage());
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID);
conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID);
try {
HAUtil.verifyAndSetConfiguration(conf);
fail("Should throw YarnRuntimeException. by Configuration#set()");
}
catch ( YarnRuntimeException e) {
String confKey=HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS,RM1_NODE_ID);
assertEquals("YarnRuntimeException by Configuration#set()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getNeedToSetValueMessage(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,RM1_NODE_ID) + " or " + confKey),e.getMessage());
}
conf.clear();
conf.set(YarnConfiguration.RM_HA_IDS,RM2_NODE_ID + "," + RM3_NODE_ID);
conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID_UNTRIMMED);
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) {
conf.set(HAUtil.addSuffix(confKey,RM1_NODE_ID),RM1_ADDRESS_UNTRIMMED);
conf.set(HAUtil.addSuffix(confKey,RM2_NODE_ID),RM2_ADDRESS);
conf.set(HAUtil.addSuffix(confKey,RM3_NODE_ID),RM3_ADDRESS);
}
try {
HAUtil.verifyAndSetConfiguration(conf);
}
catch ( YarnRuntimeException e) {
assertEquals("YarnRuntimeException by getRMId()'s validation",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getRMHAIdNeedToBeIncludedMessage("[rm2, rm3]",RM1_NODE_ID),e.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetRMServiceId() throws Exception {
conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID);
Collection rmhaIds=HAUtil.getRMHAIds(conf);
assertEquals(2,rmhaIds.size());
String[] ids=rmhaIds.toArray(new String[0]);
assertEquals(RM1_NODE_ID,ids[0]);
assertEquals(RM2_NODE_ID,ids[1]);
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetRMId() throws Exception {
conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID);
assertEquals("Does not honor " + YarnConfiguration.RM_HA_ID,RM1_NODE_ID,HAUtil.getRMHAId(conf));
conf.clear();
assertNull("Return null when " + YarnConfiguration.RM_HA_ID + " is not set",HAUtil.getRMHAId(conf));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetSocketAddressForNMWithHA(){
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.NM_ADDRESS,"0.0.0.0:1234");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_ID,"rm1");
assertTrue(HAUtil.isHAEnabled(conf));
InetSocketAddress addr=conf.getSocketAddr(YarnConfiguration.NM_ADDRESS,YarnConfiguration.DEFAULT_NM_ADDRESS,YarnConfiguration.DEFAULT_NM_PORT);
assertEquals(1234,addr.getPort());
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testRMWebUrlSpecified() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"fortesting:24543");
conf.set(YarnConfiguration.RM_ADDRESS,"rmtesting:9999");
String rmWebUrl=WebAppUtils.getRMWebAppURLWithScheme(conf);
String[] parts=rmWebUrl.split(":");
Assert.assertEquals("RM Web URL Port is incrrect",24543,Integer.valueOf(parts[parts.length - 1]).intValue());
Assert.assertNotSame("RM Web Url not resolved correctly. Should not be rmtesting","http://rmtesting:24543",rmWebUrl);
}
InternalCallVerifier EqualityVerifier
@Test public void testGetSocketAddr() throws Exception {
YarnConfiguration conf;
InetSocketAddress resourceTrackerAddress;
conf=new YarnConfiguration();
resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(new InetSocketAddress(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),resourceTrackerAddress);
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"10.0.0.1");
resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(new InetSocketAddress("10.0.0.1",YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),resourceTrackerAddress);
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"10.0.0.2:5001");
resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(new InetSocketAddress("10.0.0.2",5001),resourceTrackerAddress);
conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_BIND_HOST,"10.0.0.3");
resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(new InetSocketAddress("10.0.0.3",YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),resourceTrackerAddress);
conf.set(YarnConfiguration.RM_BIND_HOST,"0.0.0.0");
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"10.0.0.2");
resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(new InetSocketAddress("0.0.0.0",YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),resourceTrackerAddress);
conf.set(YarnConfiguration.RM_BIND_HOST,"0.0.0.0");
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"10.0.0.2:5003");
resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
assertEquals(new InetSocketAddress("0.0.0.0",5003),resourceTrackerAddress);
}
InternalCallVerifier BooleanVerifier
@Test public void testUpdateConnectAddr() throws Exception {
YarnConfiguration conf;
InetSocketAddress resourceTrackerConnectAddress;
InetSocketAddress serverAddress;
conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"yo.yo.yo");
serverAddress=new InetSocketAddress(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
resourceTrackerConnectAddress=conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,serverAddress);
assertFalse(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"yo.yo.yo");
conf.set(YarnConfiguration.RM_BIND_HOST,"0.0.0.0");
serverAddress=new InetSocketAddress(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
resourceTrackerConnectAddress=conf.updateConnectAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,serverAddress);
assertTrue(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
}
InternalCallVerifier BooleanVerifier
@Test public void testRefreshLogRetentionSettings() throws IOException {
long now=System.currentTimeMillis();
long before2000Secs=now - (2000 * 1000);
long before50Secs=now - (50 * 1000);
String root="mockfs://foo/";
String remoteRootLogDir=root + "tmp/logs";
String suffix="logs";
final Configuration conf=new Configuration();
conf.setClass("fs.mockfs.impl",MockFileSystem.class,FileSystem.class);
conf.set(YarnConfiguration.LOG_AGGREGATION_ENABLED,"true");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS,"1800");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS,"1");
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,remoteRootLogDir);
conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,suffix);
Path rootPath=new Path(root);
FileSystem rootFs=rootPath.getFileSystem(conf);
FileSystem mockFs=((FilterFileSystem)rootFs).getRawFileSystem();
Path remoteRootLogPath=new Path(remoteRootLogDir);
Path userDir=new Path(remoteRootLogPath,"me");
FileStatus userDirStatus=new FileStatus(0,true,0,0,before50Secs,userDir);
when(mockFs.listStatus(remoteRootLogPath)).thenReturn(new FileStatus[]{userDirStatus});
Path userLogDir=new Path(userDir,suffix);
Path app1Dir=new Path(userLogDir,"application_1_1");
FileStatus app1DirStatus=new FileStatus(0,true,0,0,before2000Secs,app1Dir);
Path app2Dir=new Path(userLogDir,"application_1_2");
FileStatus app2DirStatus=new FileStatus(0,true,0,0,before50Secs,app2Dir);
when(mockFs.listStatus(userLogDir)).thenReturn(new FileStatus[]{app1DirStatus,app2DirStatus});
Path app1Log1=new Path(app1Dir,"host1");
FileStatus app1Log1Status=new FileStatus(10,false,1,1,before2000Secs,app1Log1);
when(mockFs.listStatus(app1Dir)).thenReturn(new FileStatus[]{app1Log1Status});
Path app2Log1=new Path(app2Dir,"host1");
FileStatus app2Log1Status=new FileStatus(10,false,1,1,before50Secs,app2Log1);
when(mockFs.listStatus(app2Dir)).thenReturn(new FileStatus[]{app2Log1Status});
AggregatedLogDeletionService deletionSvc=new AggregatedLogDeletionService(){
@Override protected Configuration createConf(){
return conf;
}
}
;
deletionSvc.init(conf);
deletionSvc.start();
verify(mockFs,timeout(10000)).delete(app1Dir,true);
verify(mockFs,timeout(3000).times(0)).delete(app2Dir,true);
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS,"50");
conf.set(YarnConfiguration.LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS,"2");
Assert.assertTrue(2000l != deletionSvc.getCheckIntervalMsecs());
deletionSvc.refreshLogRetentionSettings();
Assert.assertTrue(2000l == deletionSvc.getCheckIntervalMsecs());
verify(mockFs,timeout(10000)).delete(app2Dir,true);
deletionSvc.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter HybridVerifier
@Test(timeout=10000) public void testContainerLogsFileAccess() throws IOException {
Assume.assumeTrue(NativeIO.isAvailable());
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
File workDir=new File(testWorkDir,"testContainerLogsFileAccess1");
Path remoteAppLogFile=new Path(workDir.getAbsolutePath(),"aggregatedLogFile");
Path srcFileRoot=new Path(workDir.getAbsolutePath(),"srcFiles");
String data="Log File content for container : ";
ApplicationId applicationId=ApplicationId.newInstance(1,1);
ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(applicationId,1);
ContainerId testContainerId1=ContainerId.newInstance(applicationAttemptId,1);
Path appDir=new Path(srcFileRoot,testContainerId1.getApplicationAttemptId().getApplicationId().toString());
Path srcFilePath1=new Path(appDir,testContainerId1.toString());
String stdout="stdout";
String stderr="stderr";
writeSrcFile(srcFilePath1,stdout,data + testContainerId1.toString() + stdout);
writeSrcFile(srcFilePath1,stderr,data + testContainerId1.toString() + stderr);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
LogWriter logWriter=new LogWriter(conf,remoteAppLogFile,ugi);
LogKey logKey=new LogKey(testContainerId1);
String randomUser="randomUser";
LogValue logValue=spy(new LogValue(Collections.singletonList(srcFileRoot.toString()),testContainerId1,randomUser));
when(logValue.getUser()).thenReturn(randomUser).thenReturn(ugi.getShortUserName());
logWriter.append(logKey,logValue);
logWriter.close();
BufferedReader in=new BufferedReader(new FileReader(new File(remoteAppLogFile.toUri().getRawPath())));
String line;
StringBuffer sb=new StringBuffer("");
while ((line=in.readLine()) != null) {
LOG.info(line);
sb.append(line);
}
line=sb.toString();
String expectedOwner=ugi.getShortUserName();
if (Path.WINDOWS) {
final String adminsGroupString="Administrators";
if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
expectedOwner=adminsGroupString;
}
}
String stdoutFile1=StringUtils.join(File.separator,Arrays.asList(new String[]{workDir.getAbsolutePath(),"srcFiles",testContainerId1.getApplicationAttemptId().getApplicationId().toString(),testContainerId1.toString(),stderr}));
String message1="Owner '" + expectedOwner + "' for path "+ stdoutFile1+ " did not match expected owner '"+ randomUser+ "'";
String stdoutFile2=StringUtils.join(File.separator,Arrays.asList(new String[]{workDir.getAbsolutePath(),"srcFiles",testContainerId1.getApplicationAttemptId().getApplicationId().toString(),testContainerId1.toString(),stdout}));
String message2="Owner '" + expectedOwner + "' for path "+ stdoutFile2+ " did not match expected owner '"+ ugi.getShortUserName()+ "'";
Assert.assertTrue(line.contains(message1));
Assert.assertFalse(line.contains(message2));
Assert.assertFalse(line.contains(data + testContainerId1.toString() + stderr));
Assert.assertTrue(line.contains(data + testContainerId1.toString() + stdout));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testReadAcontainerLogs1() throws Exception {
Configuration conf=new Configuration();
File workDir=new File(testWorkDir,"testReadAcontainerLogs1");
Path remoteAppLogFile=new Path(workDir.getAbsolutePath(),"aggregatedLogFile");
Path srcFileRoot=new Path(workDir.getAbsolutePath(),"srcFiles");
ContainerId testContainerId=TestContainerId.newContainerId(1,1,1,1);
Path t=new Path(srcFileRoot,testContainerId.getApplicationAttemptId().getApplicationId().toString());
Path srcFilePath=new Path(t,testContainerId.toString());
int numChars=80000;
writeSrcFile(srcFilePath,"stdout",numChars);
UserGroupInformation ugi=UserGroupInformation.getCurrentUser();
LogWriter logWriter=new LogWriter(conf,remoteAppLogFile,ugi);
LogKey logKey=new LogKey(testContainerId);
LogValue logValue=new LogValue(Collections.singletonList(srcFileRoot.toString()),testContainerId,ugi.getShortUserName());
logWriter.append(logKey,logValue);
logWriter.close();
FileStatus fsStatus=fs.getFileStatus(remoteAppLogFile);
Assert.assertEquals("permissions on log aggregation file are wrong",FsPermission.createImmutable((short)0640),fsStatus.getPermission());
LogReader logReader=new LogReader(conf,remoteAppLogFile);
LogKey rLogKey=new LogKey();
DataInputStream dis=logReader.next(rLogKey);
Writer writer=new StringWriter();
LogReader.readAcontainerLogs(dis,writer);
String s=writer.toString();
int expectedLength="\n\nLogType:stdout".length() + ("\nLogLength:" + numChars).length() + "\nLog Contents:\n".length()+ numChars;
Assert.assertTrue("LogType not matched",s.contains("LogType:stdout"));
Assert.assertTrue("LogLength not matched",s.contains("LogLength:" + numChars));
Assert.assertTrue("Log Contents not matched",s.contains("Log Contents"));
StringBuilder sb=new StringBuilder();
for (int i=0; i < numChars; i++) {
sb.append(filler);
}
String expectedContent=sb.toString();
Assert.assertTrue("Log content incorrect",s.contains(expectedContent));
Assert.assertEquals(expectedLength,s.length());
}
InternalCallVerifier EqualityVerifier
/**
* Make a local and log directory inaccessible during initialization
* and verify those bad directories are recognized and removed from
* the list of available local and log directories.
* @throws IOException
*/
@Test public void testDirFailuresOnStartup() throws IOException {
Configuration conf=new YarnConfiguration();
String localDir1=new File(testDir,"localDir1").getPath();
String localDir2=new File(testDir,"localDir2").getPath();
String logDir1=new File(testDir,"logDir1").getPath();
String logDir2=new File(testDir,"logDir2").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir1 + "," + localDir2);
conf.set(YarnConfiguration.NM_LOG_DIRS,logDir1 + "," + logDir2);
prepareDirToFail(localDir1);
prepareDirToFail(logDir2);
LocalDirsHandlerService dirSvc=new LocalDirsHandlerService();
dirSvc.init(conf);
List localDirs=dirSvc.getLocalDirs();
Assert.assertEquals(1,localDirs.size());
Assert.assertEquals(new Path(localDir2).toString(),localDirs.get(0));
List logDirs=dirSvc.getLogDirs();
Assert.assertEquals(1,logDirs.size());
Assert.assertEquals(new Path(logDir1).toString(),logDirs.get(0));
}
TestInitializer InternalCallVerifier BooleanVerifier HybridVerifier
@Before public void setup() throws IOException, InterruptedException {
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"localhost:0");
cluster=new MiniYARNCluster(TestMiniYARNClusterForHA.class.getName(),2,1,1,1);
cluster.init(conf);
cluster.start();
cluster.getResourceManager(0).getRMContext().getRMAdminService().transitionToActive(new HAServiceProtocol.StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER));
assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRegisterNodeManagerRequest(){
ApplicationId appId=ApplicationId.newInstance(123456789,1);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
NMContainerStatus containerReport=NMContainerStatus.newInstance(containerId,ContainerState.RUNNING,Resource.newInstance(1024,1),"diagnostics",0,Priority.newInstance(10),1234);
List reports=Arrays.asList(containerReport);
RegisterNodeManagerRequest request=RegisterNodeManagerRequest.newInstance(NodeId.newInstance("1.1.1.1",1000),8080,Resource.newInstance(1024,1),"NM-version-id",reports,Arrays.asList(appId));
RegisterNodeManagerRequest requestProto=new RegisterNodeManagerRequestPBImpl(((RegisterNodeManagerRequestPBImpl)request).getProto());
Assert.assertEquals(containerReport,requestProto.getNMContainerStatuses().get(0));
Assert.assertEquals(8080,requestProto.getHttpPort());
Assert.assertEquals("NM-version-id",requestProto.getNMVersion());
Assert.assertEquals(NodeId.newInstance("1.1.1.1",1000),requestProto.getNodeId());
Assert.assertEquals(Resource.newInstance(1024,1),requestProto.getResource());
Assert.assertEquals(1,requestProto.getRunningApplications().size());
Assert.assertEquals(appId,requestProto.getRunningApplications().get(0));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNMContainerStatus(){
ApplicationId appId=ApplicationId.newInstance(123456789,1);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
Resource resource=Resource.newInstance(1000,200);
NMContainerStatus report=NMContainerStatus.newInstance(containerId,ContainerState.COMPLETE,resource,"diagnostics",ContainerExitStatus.ABORTED,Priority.newInstance(10),1234);
NMContainerStatus reportProto=new NMContainerStatusPBImpl(((NMContainerStatusPBImpl)report).getProto());
Assert.assertEquals("diagnostics",reportProto.getDiagnostics());
Assert.assertEquals(resource,reportProto.getAllocatedResource());
Assert.assertEquals(ContainerExitStatus.ABORTED,reportProto.getContainerExitStatus());
Assert.assertEquals(ContainerState.COMPLETE,reportProto.getContainerState());
Assert.assertEquals(containerId,reportProto.getContainerId());
Assert.assertEquals(Priority.newInstance(10),reportProto.getPriority());
Assert.assertEquals(1234,reportProto.getCreationTime());
}
InternalCallVerifier EqualityVerifier
@Test public void testRegisterNodeManagerRequest(){
RegisterNodeManagerRequest request=RegisterNodeManagerRequest.newInstance(NodeId.newInstance("host",1234),1234,Resource.newInstance(0,0),"version",Arrays.asList(NMContainerStatus.newInstance(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234L,1),1),1),ContainerState.RUNNING,Resource.newInstance(1024,1),"good",-1,Priority.newInstance(0),1234)),Arrays.asList(ApplicationId.newInstance(1234L,1),ApplicationId.newInstance(1234L,2)));
RegisterNodeManagerRequest request1=new RegisterNodeManagerRequestPBImpl(((RegisterNodeManagerRequestPBImpl)request).getProto());
Assert.assertEquals(request1.getNMContainerStatuses().size(),request.getNMContainerStatuses().size());
Assert.assertEquals(request1.getNMContainerStatuses().get(0).getContainerId(),request.getNMContainerStatuses().get(0).getContainerId());
Assert.assertEquals(request1.getRunningApplications().size(),request.getRunningApplications().size());
Assert.assertEquals(request1.getRunningApplications().get(0),request.getRunningApplications().get(0));
Assert.assertEquals(request1.getRunningApplications().get(1),request.getRunningApplications().get(1));
}
InternalCallVerifier EqualityVerifier
@Test public void testRegisterNodeManagerRequestWithNullArrays(){
RegisterNodeManagerRequest request=RegisterNodeManagerRequest.newInstance(NodeId.newInstance("host",1234),1234,Resource.newInstance(0,0),"version",null,null);
RegisterNodeManagerRequest request1=new RegisterNodeManagerRequestPBImpl(((RegisterNodeManagerRequestPBImpl)request).getProto());
Assert.assertEquals(0,request1.getNMContainerStatuses().size());
Assert.assertEquals(0,request1.getRunningApplications().size());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRoundTrip() throws Exception {
RegisterNodeManagerResponse resp=recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
byte b[]={0,1,2,3,4,5};
MasterKey containerTokenMK=recordFactory.newRecordInstance(MasterKey.class);
containerTokenMK.setKeyId(54321);
containerTokenMK.setBytes(ByteBuffer.wrap(b));
resp.setContainerTokenMasterKey(containerTokenMK);
MasterKey nmTokenMK=recordFactory.newRecordInstance(MasterKey.class);
nmTokenMK.setKeyId(12345);
nmTokenMK.setBytes(ByteBuffer.wrap(b));
resp.setNMTokenMasterKey(nmTokenMK);
resp.setNodeAction(NodeAction.NORMAL);
assertEquals(NodeAction.NORMAL,resp.getNodeAction());
assertNotNull(resp.getContainerTokenMasterKey());
assertEquals(54321,resp.getContainerTokenMasterKey().getKeyId());
assertArrayEquals(b,resp.getContainerTokenMasterKey().getBytes().array());
RegisterNodeManagerResponse respCopy=serDe(resp);
assertEquals(NodeAction.NORMAL,respCopy.getNodeAction());
assertNotNull(respCopy.getContainerTokenMasterKey());
assertEquals(54321,respCopy.getContainerTokenMasterKey().getKeyId());
assertArrayEquals(b,respCopy.getContainerTokenMasterKey().getBytes().array());
assertNotNull(resp.getNMTokenMasterKey());
assertEquals(12345,resp.getNMTokenMasterKey().getKeyId());
assertArrayEquals(b,resp.getNMTokenMasterKey().getBytes().array());
respCopy=serDe(resp);
assertEquals(NodeAction.NORMAL,respCopy.getNodeAction());
assertNotNull(respCopy.getNMTokenMasterKey());
assertEquals(12345,respCopy.getNMTokenMasterKey().getKeyId());
assertArrayEquals(b,respCopy.getNMTokenMasterKey().getBytes().array());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplications() throws IOException, YarnException {
ApplicationId appId=null;
appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
ApplicationId appId1=ApplicationId.newInstance(0,2);
writeApplicationStartData(appId1);
writeApplicationFinishData(appId1);
GetApplicationsRequest request=GetApplicationsRequest.newInstance();
GetApplicationsResponse response=historyServer.getClientService().getClientHandler().getApplications(request);
List appReport=response.getApplicationList();
Assert.assertNotNull(appReport);
Assert.assertEquals(appId,appReport.get(0).getApplicationId());
Assert.assertEquals(appId1,appReport.get(1).getApplicationId());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainers() throws IOException, YarnException {
ApplicationId appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
ContainerId containerId1=ContainerId.newInstance(appAttemptId,2);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
writeContainerStartData(containerId1);
writeContainerFinishData(containerId1);
writeApplicationFinishData(appId);
GetContainersRequest request=GetContainersRequest.newInstance(appAttemptId);
GetContainersResponse response=historyServer.getClientService().getClientHandler().getContainers(request);
List containers=response.getContainerList();
Assert.assertNotNull(containers);
Assert.assertEquals(containerId,containers.get(1).getContainerId());
Assert.assertEquals(containerId1,containers.get(0).getContainerId());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationAttemptReport() throws IOException, YarnException {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
GetApplicationAttemptReportRequest request=GetApplicationAttemptReportRequest.newInstance(appAttemptId);
GetApplicationAttemptReportResponse response=historyServer.getClientService().getClientHandler().getApplicationAttemptReport(request);
ApplicationAttemptReport attemptReport=response.getApplicationAttemptReport();
Assert.assertNotNull(attemptReport);
Assert.assertEquals("appattempt_0_0001_000001",attemptReport.getApplicationAttemptId().toString());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationAttempts() throws IOException, YarnException {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ApplicationAttemptId appAttemptId1=ApplicationAttemptId.newInstance(appId,2);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
writeApplicationAttemptStartData(appAttemptId1);
writeApplicationAttemptFinishData(appAttemptId1);
GetApplicationAttemptsRequest request=GetApplicationAttemptsRequest.newInstance(appId);
GetApplicationAttemptsResponse response=historyServer.getClientService().getClientHandler().getApplicationAttempts(request);
List attemptReports=response.getApplicationAttemptList();
Assert.assertNotNull(attemptReports);
Assert.assertEquals(appAttemptId,attemptReports.get(0).getApplicationAttemptId());
Assert.assertEquals(appAttemptId1,attemptReports.get(1).getApplicationAttemptId());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationReport() throws IOException, YarnException {
ApplicationId appId=null;
appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
GetApplicationReportRequest request=GetApplicationReportRequest.newInstance(appId);
GetApplicationReportResponse response=historyServer.getClientService().getClientHandler().getApplicationReport(request);
ApplicationReport appReport=response.getApplicationReport();
Assert.assertNotNull(appReport);
Assert.assertEquals("application_0_0001",appReport.getApplicationId().toString());
Assert.assertEquals("test type",appReport.getApplicationType().toString());
Assert.assertEquals("test queue",appReport.getQueue().toString());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainerReport() throws IOException, YarnException {
ApplicationId appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
writeApplicationFinishData(appId);
GetContainerReportRequest request=GetContainerReportRequest.newInstance(containerId);
GetContainerReportResponse response=historyServer.getClientService().getClientHandler().getContainerReport(request);
ContainerReport container=response.getContainerReport();
Assert.assertNotNull(container);
Assert.assertEquals(containerId,container.getContainerId());
Assert.assertEquals(expectedLogUrl,container.getLogUrl());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationReport() throws IOException, YarnException {
ApplicationId appId=null;
appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
ApplicationReport appReport=applicationHistoryManagerImpl.getApplication(appId);
Assert.assertNotNull(appReport);
Assert.assertEquals(appId,appReport.getApplicationId());
Assert.assertEquals(appAttemptId,appReport.getCurrentApplicationAttemptId());
Assert.assertEquals(appAttemptId.toString(),appReport.getHost());
Assert.assertEquals("test type",appReport.getApplicationType().toString());
Assert.assertEquals("test queue",appReport.getQueue().toString());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=50000) public void testStartStopServer() throws Exception {
historyServer=new ApplicationHistoryServer();
Configuration config=new YarnConfiguration();
historyServer.init(config);
assertEquals(STATE.INITED,historyServer.getServiceState());
assertEquals(4,historyServer.getServices().size());
ApplicationHistoryClientService historyService=historyServer.getClientService();
assertNotNull(historyServer.getClientService());
assertEquals(STATE.INITED,historyService.getServiceState());
historyServer.start();
assertEquals(STATE.STARTED,historyServer.getServiceState());
assertEquals(STATE.STARTED,historyService.getServiceState());
historyServer.stop();
assertEquals(STATE.STOPPED,historyServer.getServiceState());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=50000) public void testFilteOverrides() throws Exception {
String[] filterInitializers={AuthenticationFilterInitializer.class.getName(),TimelineAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + "," + TimelineAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + ", " + TimelineAuthenticationFilterInitializer.class.getName()};
for ( String filterInitializer : filterInitializers) {
historyServer=new ApplicationHistoryServer();
Configuration config=new YarnConfiguration();
config.set("hadoop.http.filter.initializers",filterInitializer);
historyServer.init(config);
historyServer.start();
Configuration tmp=historyServer.getConfig();
assertEquals(TimelineAuthenticationFilterInitializer.class.getName(),tmp.get("hadoop.http.filter.initializers"));
historyServer.stop();
AHSWebApp.resetInstance();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testMassiveWriteContainerHistoryData() throws IOException {
LOG.info("Starting testMassiveWriteContainerHistoryData");
long mb=1024 * 1024;
long usedDiskBefore=fs.getContentSummary(fsWorkingPath).getLength() / mb;
ApplicationId appId=ApplicationId.newInstance(0,1);
writeApplicationStartData(appId);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
for (int i=1; i <= 100000; ++i) {
ContainerId containerId=ContainerId.newInstance(appAttemptId,i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
writeApplicationFinishData(appId);
long usedDiskAfter=fs.getContentSummary(fsWorkingPath).getLength() / mb;
Assert.assertTrue((usedDiskAfter - usedDiskBefore) < 20);
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReadWriteApplicationAttemptHistory() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is stored before the start information"));
}
int numAppAttempts=5;
writeApplicationStartData(appId);
for (int i=1; i <= numAppAttempts; ++i) {
appAttemptId=ApplicationAttemptId.newInstance(appId,i);
writeApplicationAttemptStartData(appAttemptId);
writeApplicationAttemptFinishData(appAttemptId);
}
Assert.assertEquals(numAppAttempts,store.getApplicationAttempts(appId).size());
for (int i=1; i <= numAppAttempts; ++i) {
appAttemptId=ApplicationAttemptId.newInstance(appId,i);
ApplicationAttemptHistoryData data=store.getApplicationAttempt(appAttemptId);
Assert.assertNotNull(data);
Assert.assertEquals(appAttemptId.toString(),data.getHost());
Assert.assertEquals(appAttemptId.toString(),data.getDiagnosticsInfo());
}
writeApplicationFinishData(appId);
appAttemptId=ApplicationAttemptId.newInstance(appId,1);
try {
writeApplicationAttemptStartData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeApplicationAttemptFinishData(appAttemptId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReadWriteApplicationHistory() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
try {
writeApplicationFinishData(appId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is stored before the start information"));
}
int numApps=5;
for (int i=1; i <= numApps; ++i) {
appId=ApplicationId.newInstance(0,i);
writeApplicationStartData(appId);
writeApplicationFinishData(appId);
}
Assert.assertEquals(numApps,store.getAllApplications().size());
for (int i=1; i <= numApps; ++i) {
appId=ApplicationId.newInstance(0,i);
ApplicationHistoryData data=store.getApplication(appId);
Assert.assertNotNull(data);
Assert.assertEquals(appId.toString(),data.getApplicationName());
Assert.assertEquals(appId.toString(),data.getDiagnosticsInfo());
}
appId=ApplicationId.newInstance(0,1);
try {
writeApplicationStartData(appId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeApplicationFinishData(appId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReadWriteContainerHistory() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
try {
writeContainerFinishData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is stored before the start information"));
}
writeApplicationAttemptStartData(appAttemptId);
int numContainers=5;
for (int i=1; i <= numContainers; ++i) {
containerId=ContainerId.newInstance(appAttemptId,i);
writeContainerStartData(containerId);
writeContainerFinishData(containerId);
}
Assert.assertEquals(numContainers,store.getContainers(appAttemptId).size());
for (int i=1; i <= numContainers; ++i) {
containerId=ContainerId.newInstance(appAttemptId,i);
ContainerHistoryData data=store.getContainer(containerId);
Assert.assertNotNull(data);
Assert.assertEquals(Priority.newInstance(containerId.getId()),data.getPriority());
Assert.assertEquals(containerId.toString(),data.getDiagnosticsInfo());
}
ContainerHistoryData masterContainer=store.getAMContainer(appAttemptId);
Assert.assertNotNull(masterContainer);
Assert.assertEquals(ContainerId.newInstance(appAttemptId,1),masterContainer.getContainerId());
writeApplicationAttemptFinishData(appAttemptId);
containerId=ContainerId.newInstance(appAttemptId,1);
try {
writeContainerStartData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
try {
writeContainerFinishData(containerId);
Assert.fail();
}
catch ( IOException e) {
Assert.assertTrue(e.getMessage().contains("is already stored"));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppControllerIndex() throws Exception {
ApplicationHistoryManager ahManager=mock(ApplicationHistoryManager.class);
Injector injector=WebAppTests.createMockInjector(ApplicationHistoryManager.class,ahManager);
AHSController controller=injector.getInstance(AHSController.class);
controller.index();
Assert.assertEquals("Application History",controller.get(TITLE,"unknown"));
}
InternalCallVerifier EqualityVerifier
@Test public void testSingleApp() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject app=json.getJSONObject("app");
assertEquals(appId.toString(),app.getString("appId"));
assertEquals(appId.toString(),app.get("name"));
assertEquals(appId.toString(),app.get("diagnosticsInfo"));
assertEquals("test queue",app.get("queue"));
assertEquals("test user",app.get("user"));
assertEquals("test type",app.get("type"));
assertEquals(FinalApplicationStatus.UNDEFINED.toString(),app.get("finalAppStatus"));
assertEquals(YarnApplicationState.FINISHED.toString(),app.get("appState"));
}
InternalCallVerifier EqualityVerifier
@Test public void testMultipleAttempts() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject appAttempts=json.getJSONObject("appAttempts");
assertEquals("incorrect number of elements",1,appAttempts.length());
JSONArray array=appAttempts.getJSONArray("appAttempt");
assertEquals("incorrect number of elements",5,array.length());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMultipleContainers() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).path("containers").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject containers=json.getJSONObject("containers");
assertEquals("incorrect number of elements",1,containers.length());
JSONArray array=containers.getJSONArray("container");
assertEquals("incorrect number of elements",5,array.length());
}
InternalCallVerifier EqualityVerifier
@Test public void testAppsQuery() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").queryParam("state",YarnApplicationState.FINISHED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",5,array.length());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleContainer() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId containerId=ContainerId.newInstance(appAttemptId,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).path("containers").path(containerId.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject container=json.getJSONObject("container");
assertEquals(containerId.toString(),container.getString("containerId"));
assertEquals(containerId.toString(),container.getString("diagnosticsInfo"));
assertEquals("0",container.getString("allocatedMB"));
assertEquals("0",container.getString("allocatedVCores"));
assertEquals(NodeId.newInstance("localhost",0).toString(),container.getString("assignedNodeId"));
assertEquals(Priority.newInstance(containerId.getId()).toString(),container.getString("priority"));
Configuration conf=new YarnConfiguration();
assertEquals(WebAppUtils.getHttpSchemePrefix(conf) + WebAppUtils.getAHSWebAppURLWithoutScheme(conf) + "/applicationhistory/logs/localhost:0/container_0_0001_01_000001/"+ "container_0_0001_01_000001/test user",container.getString("logUrl"));
assertEquals(ContainerState.COMPLETE.toString(),container.getString("containerState"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAttempt() throws Exception {
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject appAttempt=json.getJSONObject("appAttempt");
assertEquals(appAttemptId.toString(),appAttempt.getString("appAttemptId"));
assertEquals(appAttemptId.toString(),appAttempt.getString("host"));
assertEquals(appAttemptId.toString(),appAttempt.getString("diagnosticsInfo"));
assertEquals("test tracking url",appAttempt.getString("trackingUrl"));
assertEquals(YarnApplicationAttemptState.FINISHED.toString(),appAttempt.get("appAttemptState"));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri2() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidAccept() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("applicationhistory").accept(MediaType.TEXT_PLAIN).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("applicationhistory").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testRunCommandwithPriority() throws Exception {
Configuration conf=new Configuration();
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,2);
String[] command=ContainerExecutor.getRunCommand("echo","group1",conf);
if (Shell.WINDOWS) {
assertEquals("first command should be the run command for the platform",Shell.WINUTILS,command[0]);
}
else {
assertEquals("first command should be nice","nice",command[0]);
assertEquals("second command should be -n","-n",command[1]);
assertEquals("third command should be the priority",Integer.toString(2),command[2]);
}
conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,-5);
command=ContainerExecutor.getRunCommand("echo","group1",conf);
if (Shell.WINDOWS) {
assertEquals("first command should be the run command for the platform",Shell.WINUTILS,command[0]);
}
else {
assertEquals("first command should be nice","nice",command[0]);
assertEquals("second command should be -n","-n",command[1]);
assertEquals("third command should be the priority",Integer.toString(-5),command[2]);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDirPermissions() throws Exception {
deleteTmpFiles();
final String user="somebody";
final String appId="app_12345_123";
final FsPermission userCachePerm=new FsPermission(DefaultContainerExecutor.USER_PERM);
final FsPermission appCachePerm=new FsPermission(DefaultContainerExecutor.APPCACHE_PERM);
final FsPermission fileCachePerm=new FsPermission(DefaultContainerExecutor.FILECACHE_PERM);
final FsPermission appDirPerm=new FsPermission(DefaultContainerExecutor.APPDIR_PERM);
final FsPermission logDirPerm=new FsPermission(DefaultContainerExecutor.LOGDIR_PERM);
List localDirs=new ArrayList();
localDirs.add(new Path(BASE_TMP_PATH,"localDirA").toString());
localDirs.add(new Path(BASE_TMP_PATH,"localDirB").toString());
List logDirs=new ArrayList();
logDirs.add(new Path(BASE_TMP_PATH,"logDirA").toString());
logDirs.add(new Path(BASE_TMP_PATH,"logDirB").toString());
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext lfs=FileContext.getLocalFSFileContext(conf);
DefaultContainerExecutor executor=new DefaultContainerExecutor(lfs);
executor.init();
try {
executor.createUserLocalDirs(localDirs,user);
executor.createUserCacheDirs(localDirs,user);
executor.createAppDirs(localDirs,user,appId);
for ( String dir : localDirs) {
FileStatus stats=lfs.getFileStatus(new Path(new Path(dir,ContainerLocalizer.USERCACHE),user));
Assert.assertEquals(userCachePerm,stats.getPermission());
}
for ( String dir : localDirs) {
Path userCachePath=new Path(new Path(dir,ContainerLocalizer.USERCACHE),user);
Path appCachePath=new Path(userCachePath,ContainerLocalizer.APPCACHE);
FileStatus stats=lfs.getFileStatus(appCachePath);
Assert.assertEquals(appCachePerm,stats.getPermission());
stats=lfs.getFileStatus(new Path(userCachePath,ContainerLocalizer.FILECACHE));
Assert.assertEquals(fileCachePerm,stats.getPermission());
stats=lfs.getFileStatus(new Path(appCachePath,appId));
Assert.assertEquals(appDirPerm,stats.getPermission());
}
executor.createAppLogDirs(appId,logDirs);
for ( String dir : logDirs) {
FileStatus stats=lfs.getFileStatus(new Path(dir,appId));
Assert.assertEquals(logDirPerm,stats.getPermission());
}
}
finally {
deleteTmpFiles();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier HybridVerifier
@Test public void testContainerLaunchError() throws IOException, InterruptedException {
Path localDir=new Path(BASE_TMP_PATH,"localDir");
List localDirs=new ArrayList();
localDirs.add(localDir.toString());
List logDirs=new ArrayList();
Path logDir=new Path(BASE_TMP_PATH,"logDir");
logDirs.add(logDir.toString());
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir.toString());
conf.set(YarnConfiguration.NM_LOG_DIRS,logDir.toString());
FileContext lfs=FileContext.getLocalFSFileContext(conf);
DefaultContainerExecutor mockExec=spy(new DefaultContainerExecutor(lfs));
mockExec.setConf(conf);
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
String diagnostics=(String)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + diagnostics,diagnostics.contains("No such file or directory"));
return null;
}
}
).when(mockExec).logOutput(any(String.class));
String appSubmitter="nobody";
String appId="APP_ID";
String containerId="CONTAINER_ID";
Container container=mock(Container.class);
ContainerId cId=mock(ContainerId.class);
ContainerLaunchContext context=mock(ContainerLaunchContext.class);
HashMap env=new HashMap();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
try {
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
ContainerDiagnosticsUpdateEvent event=(ContainerDiagnosticsUpdateEvent)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + event.getDiagnosticsUpdate(),event.getDiagnosticsUpdate().contains("No such file or directory"));
return null;
}
}
).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class));
when(cId.toString()).thenReturn(containerId);
when(cId.getApplicationAttemptId()).thenReturn(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),0));
when(context.getEnvironment()).thenReturn(env);
mockExec.createUserLocalDirs(localDirs,appSubmitter);
mockExec.createUserCacheDirs(localDirs,appSubmitter);
mockExec.createAppDirs(localDirs,appSubmitter,appId);
mockExec.createAppLogDirs(appId,logDirs);
Path scriptPath=new Path("file:///bin/echo");
Path tokensPath=new Path("file:///dev/null");
Path workDir=localDir;
Path pidFile=new Path(workDir,"pid.txt");
mockExec.init();
mockExec.activateContainer(cId,pidFile);
int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,localDirs,localDirs);
Assert.assertNotSame(0,ret);
}
finally {
mockExec.deleteAsUser(appSubmitter,localDir);
mockExec.deleteAsUser(appSubmitter,logDir);
}
}
InternalCallVerifier BooleanVerifier
@Test public void testAbsDelete() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List dirs=buildDirs(r,base,20);
createDirs(new Path("."),dirs);
FakeDefaultContainerExecutor exec=new FakeDefaultContainerExecutor();
Configuration conf=new Configuration();
exec.setConf(conf);
DeletionService del=new DeletionService(exec);
del.init(conf);
del.start();
try {
for ( Path p : dirs) {
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,null);
}
int msecToWait=20 * 1000;
for ( Path p : dirs) {
while (msecToWait > 0 && lfs.util().exists(p)) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(p));
}
}
finally {
del.stop();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testStopWithDelayedTasks() throws Exception {
DeletionService del=new DeletionService(Mockito.mock(ContainerExecutor.class));
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC,60);
try {
del.init(conf);
del.start();
del.delete("dingo",new Path("/does/not/exist"));
}
finally {
del.stop();
}
assertTrue(del.isTerminated());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRelativeDelete() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List baseDirs=buildDirs(r,base,4);
createDirs(new Path("."),baseDirs);
List content=buildDirs(r,new Path("."),10);
for ( Path b : baseDirs) {
createDirs(b,content);
}
DeletionService del=new DeletionService(new FakeDefaultContainerExecutor());
try {
del.init(new Configuration());
del.start();
for ( Path p : content) {
assertTrue(lfs.util().exists(new Path(baseDirs.get(0),p)));
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,baseDirs.toArray(new Path[4]));
}
int msecToWait=20 * 1000;
for ( Path p : baseDirs) {
for ( Path q : content) {
Path fp=new Path(p,q);
while (msecToWait > 0 && lfs.util().exists(fp)) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(fp));
}
}
}
finally {
del.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testFileDeletionTaskDependency() throws Exception {
FakeDefaultContainerExecutor exec=new FakeDefaultContainerExecutor();
Configuration conf=new Configuration();
exec.setConf(conf);
DeletionService del=new DeletionService(exec);
del.init(conf);
del.start();
try {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List dirs=buildDirs(r,base,2);
createDirs(new Path("."),dirs);
List subDirs=buildDirs(r,dirs.get(0),2);
FileDeletionTask dependentDeletionTask=del.createFileDeletionTask(null,dirs.get(0),new Path[]{});
List deletionTasks=new ArrayList();
for ( Path subDir : subDirs) {
FileDeletionTask deletionTask=del.createFileDeletionTask(null,null,new Path[]{subDir});
deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
deletionTasks.add(deletionTask);
}
for ( FileDeletionTask task : deletionTasks) {
del.scheduleFileDeletionTask(task);
}
int msecToWait=20 * 1000;
while (msecToWait > 0 && (lfs.util().exists(dirs.get(0)))) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(dirs.get(0)));
subDirs=buildDirs(r,dirs.get(1),2);
subDirs.add(new Path(dirs.get(1),"absentFile"));
dependentDeletionTask=del.createFileDeletionTask(null,dirs.get(1),new Path[]{});
deletionTasks=new ArrayList();
for ( Path subDir : subDirs) {
FileDeletionTask deletionTask=del.createFileDeletionTask(null,null,new Path[]{subDir});
deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
deletionTasks.add(deletionTask);
}
deletionTasks.get(2).setSuccess(false);
for ( FileDeletionTask task : deletionTasks) {
del.scheduleFileDeletionTask(task);
}
msecToWait=20 * 1000;
while (msecToWait > 0 && (lfs.util().exists(subDirs.get(0)) || lfs.util().exists(subDirs.get(1)))) {
Thread.sleep(100);
msecToWait-=100;
}
assertTrue(lfs.util().exists(dirs.get(1)));
}
finally {
del.stop();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testNoDelete() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List dirs=buildDirs(r,base,20);
createDirs(new Path("."),dirs);
FakeDefaultContainerExecutor exec=new FakeDefaultContainerExecutor();
Configuration conf=new Configuration();
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC,-1);
exec.setConf(conf);
DeletionService del=new DeletionService(exec);
try {
del.init(conf);
del.start();
for ( Path p : dirs) {
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,null);
}
int msecToWait=20 * 1000;
for ( Path p : dirs) {
while (msecToWait > 0 && lfs.util().exists(p)) {
Thread.sleep(100);
msecToWait-=100;
}
assertTrue(lfs.util().exists(p));
}
}
finally {
del.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRecovery() throws Exception {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
List baseDirs=buildDirs(r,base,4);
createDirs(new Path("."),baseDirs);
List content=buildDirs(r,new Path("."),10);
for ( Path b : baseDirs) {
createDirs(b,content);
}
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
conf.setInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC,1);
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
DeletionService del=new DeletionService(new FakeDefaultContainerExecutor(),stateStore);
try {
del.init(conf);
del.start();
for ( Path p : content) {
assertTrue(lfs.util().exists(new Path(baseDirs.get(0),p)));
del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",p,baseDirs.toArray(new Path[4]));
}
del.stop();
del=new DeletionService(new FakeDefaultContainerExecutor(),stateStore);
del.init(conf);
del.start();
int msecToWait=10 * 1000;
for ( Path p : baseDirs) {
for ( Path q : content) {
Path fp=new Path(p,q);
while (msecToWait > 0 && lfs.util().exists(fp)) {
Thread.sleep(100);
msecToWait-=100;
}
assertFalse(lfs.util().exists(fp));
}
}
}
finally {
del.close();
stateStore.close();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDiskSpaceUtilizationLimit() throws IOException {
String dirA=new File(testDir,"dirA").getPath();
String[] dirs={dirA};
DirectoryCollection dc=new DirectoryCollection(dirs,0.0F);
dc.checkDirs();
Assert.assertEquals(0,dc.getGoodDirs().size());
Assert.assertEquals(1,dc.getFailedDirs().size());
dc=new DirectoryCollection(dirs,100.0F);
dc.checkDirs();
Assert.assertEquals(1,dc.getGoodDirs().size());
Assert.assertEquals(0,dc.getFailedDirs().size());
dc=new DirectoryCollection(dirs,testDir.getTotalSpace() / (1024 * 1024));
dc.checkDirs();
Assert.assertEquals(0,dc.getGoodDirs().size());
Assert.assertEquals(1,dc.getFailedDirs().size());
dc=new DirectoryCollection(dirs,100.0F,0);
dc.checkDirs();
Assert.assertEquals(1,dc.getGoodDirs().size());
Assert.assertEquals(0,dc.getFailedDirs().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testDiskLimitsCutoffSetters(){
String[] dirs={"dir"};
DirectoryCollection dc=new DirectoryCollection(dirs,0.0F,100);
float testValue=57.5F;
float delta=0.1F;
dc.setDiskUtilizationPercentageCutoff(testValue);
Assert.assertEquals(testValue,dc.getDiskUtilizationPercentageCutoff(),delta);
testValue=-57.5F;
dc.setDiskUtilizationPercentageCutoff(testValue);
Assert.assertEquals(0.0F,dc.getDiskUtilizationPercentageCutoff(),delta);
testValue=157.5F;
dc.setDiskUtilizationPercentageCutoff(testValue);
Assert.assertEquals(100.0F,dc.getDiskUtilizationPercentageCutoff(),delta);
long spaceValue=57;
dc.setDiskUtilizationSpaceCutoff(spaceValue);
Assert.assertEquals(spaceValue,dc.getDiskUtilizationSpaceCutoff());
spaceValue=-57;
dc.setDiskUtilizationSpaceCutoff(spaceValue);
Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff());
}
InternalCallVerifier EqualityVerifier
@Test public void testConstructors(){
String[] dirs={"dir"};
float delta=0.1F;
DirectoryCollection dc=new DirectoryCollection(dirs);
Assert.assertEquals(100.0F,dc.getDiskUtilizationPercentageCutoff(),delta);
Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff());
dc=new DirectoryCollection(dirs,57.5F);
Assert.assertEquals(57.5F,dc.getDiskUtilizationPercentageCutoff(),delta);
Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff());
dc=new DirectoryCollection(dirs,57);
Assert.assertEquals(100.0F,dc.getDiskUtilizationPercentageCutoff(),delta);
Assert.assertEquals(57,dc.getDiskUtilizationSpaceCutoff());
dc=new DirectoryCollection(dirs,57.5F,67);
Assert.assertEquals(57.5F,dc.getDiskUtilizationPercentageCutoff(),delta);
Assert.assertEquals(67,dc.getDiskUtilizationSpaceCutoff());
dc=new DirectoryCollection(dirs,-57.5F,-67);
Assert.assertEquals(0.0F,dc.getDiskUtilizationPercentageCutoff(),delta);
Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff());
dc=new DirectoryCollection(dirs,157.5F,-67);
Assert.assertEquals(100.0F,dc.getDiskUtilizationPercentageCutoff(),delta);
Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateDirectories() throws IOException {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext localFs=FileContext.getLocalFSFileContext(conf);
String dirA=new File(testDir,"dirA").getPath();
String dirB=new File(dirA,"dirB").getPath();
String dirC=new File(testDir,"dirC").getPath();
Path pathC=new Path(dirC);
FsPermission permDirC=new FsPermission((short)0710);
localFs.mkdir(pathC,null,true);
localFs.setPermission(pathC,permDirC);
String[] dirs={dirA,dirB,dirC};
DirectoryCollection dc=new DirectoryCollection(dirs,conf.getFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
FsPermission defaultPerm=FsPermission.getDefault().applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK));
boolean createResult=dc.createNonExistentDirs(localFs,defaultPerm);
Assert.assertTrue(createResult);
FileStatus status=localFs.getFileStatus(new Path(dirA));
Assert.assertEquals("local dir parent not created with proper permissions",defaultPerm,status.getPermission());
status=localFs.getFileStatus(new Path(dirB));
Assert.assertEquals("local dir not created with proper permissions",defaultPerm,status.getPermission());
status=localFs.getFileStatus(pathC);
Assert.assertEquals("existing local directory permissions modified",permDirC,status.getPermission());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testConcurrentAccess() throws IOException {
Configuration conf=new Configuration();
String[] dirs={testFile.getPath()};
DirectoryCollection dc=new DirectoryCollection(dirs,conf.getFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE));
List list=dc.getGoodDirs();
ListIterator li=list.listIterator();
Assert.assertTrue("checkDirs did not remove test file from directory list",dc.checkDirs());
li.next();
}
InternalCallVerifier EqualityVerifier
@Test public void testLocalUser() throws Exception {
try {
Configuration conf=new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"simple");
UserGroupInformation.setConfiguration(conf);
LinuxContainerExecutor lce=new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,lce.getRunAsUser("foo"));
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY,"bar");
lce=new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals("bar",lce.getRunAsUser("foo"));
conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY,"bar");
conf.setBoolean(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS,false);
lce=new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals("foo",lce.getRunAsUser("foo"));
conf=new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
lce=new LinuxContainerExecutor();
lce.setConf(conf);
Assert.assertEquals("foo",lce.getRunAsUser("foo"));
}
finally {
Configuration conf=new YarnConfiguration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"simple");
UserGroupInformation.setConfiguration(conf);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testContainerKill() throws Exception {
if (!shouldRun()) {
return;
}
final ContainerId sleepId=getNextContainerId();
Thread t=new Thread(){
public void run(){
try {
runAndBlock(sleepId,"sleep","100");
}
catch ( IOException e) {
LOG.warn("Caught exception while running sleep",e);
}
}
}
;
t.setDaemon(true);
t.start();
assertTrue(t.isAlive());
String pid=null;
int count=10;
while ((pid=exec.getProcessId(sleepId)) == null && count > 0) {
LOG.info("Sleeping for 200 ms before checking for pid ");
Thread.sleep(200);
count--;
}
assertNotNull(pid);
LOG.info("Going to killing the process.");
exec.signalContainer(appSubmitter,pid,Signal.TERM);
LOG.info("sleeping for 100ms to let the sleep be killed");
Thread.sleep(100);
assertFalse(t.isAlive());
}
InternalCallVerifier EqualityVerifier
@Test public void testContainerLaunch() throws IOException {
String appSubmitter="nobody";
String cmd=String.valueOf(LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue());
String appId="APP_ID";
String containerId="CONTAINER_ID";
Container container=mock(Container.class);
ContainerId cId=mock(ContainerId.class);
ContainerLaunchContext context=mock(ContainerLaunchContext.class);
HashMap env=new HashMap();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(cId.toString()).thenReturn(containerId);
when(context.getEnvironment()).thenReturn(env);
Path scriptPath=new Path("file:///bin/echo");
Path tokensPath=new Path("file:///dev/null");
Path workDir=new Path("/tmp");
Path pidFile=new Path(workDir,"pid.txt");
mockExec.activateContainer(cId,pidFile);
int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,dirsHandler.getLocalDirs(),dirsHandler.getLogDirs());
assertEquals(0,ret);
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,appSubmitter,cmd,appId,containerId,workDir.toString(),"/bin/echo","/dev/null",pidFile.toString(),StringUtils.join(",",dirsHandler.getLocalDirs()),StringUtils.join(",",dirsHandler.getLogDirs()),"cgroups=none"),readMockParams());
}
InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testContainerLaunchError() throws IOException {
File f=new File("./src/test/resources/mock-container-executer-with-error");
if (!FileUtil.canExecute(f)) {
FileUtil.setExecutable(f,true);
}
String executorPath=f.getAbsolutePath();
Configuration conf=new Configuration();
conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,executorPath);
conf.set(YarnConfiguration.NM_LOCAL_DIRS,"file:///bin/echo");
conf.set(YarnConfiguration.NM_LOG_DIRS,"file:///dev/null");
mockExec=spy(new LinuxContainerExecutor());
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
String diagnostics=(String)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + diagnostics,diagnostics.contains("badcommand"));
return null;
}
}
).when(mockExec).logOutput(any(String.class));
dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
mockExec.setConf(conf);
String appSubmitter="nobody";
String cmd=String.valueOf(LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue());
String appId="APP_ID";
String containerId="CONTAINER_ID";
Container container=mock(Container.class);
ContainerId cId=mock(ContainerId.class);
ContainerLaunchContext context=mock(ContainerLaunchContext.class);
HashMap env=new HashMap();
when(container.getContainerId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
doAnswer(new Answer(){
@Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable {
ContainerDiagnosticsUpdateEvent event=(ContainerDiagnosticsUpdateEvent)invocationOnMock.getArguments()[0];
assertTrue("Invalid Diagnostics message: " + event.getDiagnosticsUpdate(),event.getDiagnosticsUpdate().contains("badcommand"));
return null;
}
}
).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class));
when(cId.toString()).thenReturn(containerId);
when(context.getEnvironment()).thenReturn(env);
Path scriptPath=new Path("file:///bin/echo");
Path tokensPath=new Path("file:///dev/null");
Path workDir=new Path("/tmp");
Path pidFile=new Path(workDir,"pid.txt");
mockExec.activateContainer(cId,pidFile);
int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,dirsHandler.getLocalDirs(),dirsHandler.getLogDirs());
Assert.assertNotSame(0,ret);
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,appSubmitter,cmd,appId,containerId,workDir.toString(),"/bin/echo","/dev/null",pidFile.toString(),StringUtils.join(",",dirsHandler.getLocalDirs()),StringUtils.join(",",dirsHandler.getLogDirs()),"cgroups=none"),readMockParams());
}
InternalCallVerifier EqualityVerifier
@Test public void testContainerKill() throws IOException {
String appSubmitter="nobody";
String cmd=String.valueOf(LinuxContainerExecutor.Commands.SIGNAL_CONTAINER.getValue());
ContainerExecutor.Signal signal=ContainerExecutor.Signal.QUIT;
String sigVal=String.valueOf(signal.getValue());
mockExec.signalContainer(appSubmitter,"1000",signal);
assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,appSubmitter,cmd,"1000",sigVal),readMockParams());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testValidPathsDirHandlerService(){
Configuration conf=new YarnConfiguration();
String localDir1=new File("file:///" + testDir,"localDir1").getPath();
String localDir2=new File("hdfs:///" + testDir,"localDir2").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir1 + "," + localDir2);
String logDir1=new File("file:///" + testDir,"logDir1").getPath();
conf.set(YarnConfiguration.NM_LOG_DIRS,logDir1);
LocalDirsHandlerService dirSvc=new LocalDirsHandlerService();
try {
dirSvc.init(conf);
Assert.fail("Service should have thrown an exception due to wrong URI");
}
catch ( YarnRuntimeException e) {
}
Assert.assertEquals("Service should not be inited",STATE.STOPPED,dirSvc.getServiceState());
}
InternalCallVerifier EqualityVerifier
@Test public void testDirStructure() throws Exception {
Configuration conf=new YarnConfiguration();
String localDir1=new File("file:///" + testDir,"localDir1").getPath();
conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir1);
String logDir1=new File("file:///" + testDir,"logDir1").getPath();
conf.set(YarnConfiguration.NM_LOG_DIRS,logDir1);
LocalDirsHandlerService dirSvc=new LocalDirsHandlerService();
dirSvc.init(conf);
Assert.assertEquals(1,dirSvc.getLocalDirs().size());
}
InternalCallVerifier BooleanVerifier
@Test public void testNodeHealthScriptShouldRun() throws IOException {
Assert.assertFalse("By default Health script should not have started",NodeHealthScriptRunner.shouldRun(new Configuration()));
Configuration conf=getConfForNodeHealthScript();
Assert.assertFalse("Node health script should start",NodeHealthScriptRunner.shouldRun(conf));
conf.writeXml(new FileOutputStream(nodeHealthConfigFile));
conf.addResource(nodeHealthConfigFile.getName());
writeNodeHealthScriptFile("",false);
Assert.assertFalse("Node health script should start",NodeHealthScriptRunner.shouldRun(conf));
writeNodeHealthScriptFile("",true);
Assert.assertTrue("Node health script should start",NodeHealthScriptRunner.shouldRun(conf));
}
InternalCallVerifier BooleanVerifier
@Test public void testNodeHealthScript() throws Exception {
RecordFactory factory=RecordFactoryProvider.getRecordFactory(null);
NodeHealthStatus healthStatus=factory.newRecordInstance(NodeHealthStatus.class);
String errorScript="echo ERROR\n echo \"Tracker not healthy\"";
String normalScript="echo \"I am all fine\"";
String timeOutScript=Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\"" : "sleep 4\necho \"I am fine\"";
Configuration conf=getConfForNodeHealthScript();
conf.writeXml(new FileOutputStream(nodeHealthConfigFile));
conf.addResource(nodeHealthConfigFile.getName());
writeNodeHealthScriptFile(normalScript,true);
NodeHealthCheckerService nodeHealthChecker=new NodeHealthCheckerService();
nodeHealthChecker.init(conf);
NodeHealthScriptRunner nodeHealthScriptRunner=nodeHealthChecker.getNodeHealthScriptRunner();
TimerTask timerTask=nodeHealthScriptRunner.getTimerTask();
timerTask.run();
setHealthStatus(healthStatus,nodeHealthChecker.isHealthy(),nodeHealthChecker.getHealthReport(),nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking initial healthy condition");
Assert.assertTrue("Node health status reported unhealthy",healthStatus.getIsNodeHealthy());
Assert.assertTrue("Node health status reported unhealthy",healthStatus.getHealthReport().equals(nodeHealthChecker.getHealthReport()));
writeNodeHealthScriptFile(errorScript,true);
timerTask.run();
setHealthStatus(healthStatus,nodeHealthChecker.isHealthy(),nodeHealthChecker.getHealthReport(),nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking Healthy--->Unhealthy");
Assert.assertFalse("Node health status reported healthy",healthStatus.getIsNodeHealthy());
Assert.assertTrue("Node health status reported healthy",healthStatus.getHealthReport().equals(nodeHealthChecker.getHealthReport()));
writeNodeHealthScriptFile(normalScript,true);
timerTask.run();
setHealthStatus(healthStatus,nodeHealthChecker.isHealthy(),nodeHealthChecker.getHealthReport(),nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking UnHealthy--->healthy");
Assert.assertTrue("Node health status reported unhealthy",healthStatus.getIsNodeHealthy());
Assert.assertTrue("Node health status reported unhealthy",healthStatus.getHealthReport().equals(nodeHealthChecker.getHealthReport()));
writeNodeHealthScriptFile(timeOutScript,true);
timerTask.run();
setHealthStatus(healthStatus,nodeHealthChecker.isHealthy(),nodeHealthChecker.getHealthReport(),nodeHealthChecker.getLastHealthReportTime());
LOG.info("Checking Healthy--->timeout");
Assert.assertFalse("Node health status reported healthy even after timeout",healthStatus.getIsNodeHealthy());
Assert.assertTrue("Node script time out message not propogated",healthStatus.getHealthReport().equals(NodeHealthScriptRunner.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG + NodeHealthCheckerService.SEPARATOR + nodeHealthChecker.getDiskHandler().getDisksHealthReport()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=2000000) public void testClearLocalDirWhenNodeReboot() throws IOException, YarnException, InterruptedException {
nm=new MyNodeManager();
nm.start();
final ContainerManagementProtocol containerManager=nm.getContainerManager();
createFiles(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE,100);
localResourceDir.mkdirs();
ContainerLaunchContext containerLaunchContext=Records.newRecord(ContainerLaunchContext.class);
ContainerId cId=createContainerId();
URL localResourceUri=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(localResourceDir.getAbsolutePath())));
LocalResource localResource=LocalResource.newInstance(localResourceUri,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,-1,localResourceDir.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,localResource);
containerLaunchContext.setLocalResources(localResources);
List commands=new ArrayList();
containerLaunchContext.setCommands(commands);
NodeId nodeId=nm.getNMContext().getNodeId();
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,TestContainerManager.createContainerToken(cId,0,nodeId,destinationFile,nm.getNMContext().getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
final StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
final UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(cId.getApplicationAttemptId().toString());
NMTokenIdentifier nmIdentifier=new NMTokenIdentifier(cId.getApplicationAttemptId(),nodeId,user,123);
currentUser.addTokenIdentifier(nmIdentifier);
currentUser.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws YarnException, IOException {
nm.getContainerManager().startContainers(allRequests);
return null;
}
}
);
List containerIds=new ArrayList();
containerIds.add(cId);
GetContainerStatusesRequest request=GetContainerStatusesRequest.newInstance(containerIds);
Container container=nm.getNMContext().getContainers().get(request.getContainerIds().get(0));
final int MAX_TRIES=20;
int numTries=0;
while (!container.getContainerState().equals(ContainerState.DONE) && numTries <= MAX_TRIES) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ex) {
}
numTries++;
}
Assert.assertEquals(ContainerState.DONE,container.getContainerState());
Assert.assertTrue("The container should create a subDir named currentUser: " + user + "under localDir/usercache",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0);
Assert.assertTrue("There should be files or Dirs under nm_private when " + "container is launched",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0);
nm.stop();
nm=new MyNodeManager();
nm.start();
numTries=0;
while ((numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0) && numTries < MAX_TRIES) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ex) {
}
numTries++;
}
Assert.assertTrue("After NM reboots, all local files should be deleted",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) == 0);
verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_")));
verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(user,null,new String[]{destinationFile})));
verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(null,ContainerLocalizer.USERCACHE + "_DEL_",new String[]{})));
}
BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNMSentContainerStatusOnResync() throws Exception {
final ContainerStatus testCompleteContainer=TestNodeStatusUpdater.createContainerStatus(2,ContainerState.COMPLETE);
final Container container=TestNodeStatusUpdater.getMockContainer(testCompleteContainer);
NMContainerStatus report=createNMContainerStatus(2,ContainerState.COMPLETE);
when(container.getNMContainerStatus()).thenReturn(report);
NodeManager nm=new NodeManager(){
int registerCount=0;
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
return new TestNodeStatusUpdaterResync(context,dispatcher,healthChecker,metrics){
@Override protected ResourceTracker createResourceTracker(){
return new MockResourceTracker(){
@Override public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerRequest request) throws YarnException, IOException {
if (registerCount == 0) {
try {
Assert.assertEquals(0,request.getNMContainerStatuses().size());
}
catch ( AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
getNMContext().getContainers().put(testCompleteContainer.getContainerId(),container);
}
else {
List statuses=request.getNMContainerStatuses();
try {
Assert.assertEquals(1,statuses.size());
Assert.assertEquals(testCompleteContainer.getContainerId(),statuses.get(0).getContainerId());
}
catch ( AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
}
registerCount++;
return super.registerNodeManager(request);
}
@Override public NodeHeartbeatResponse nodeHeartbeat( NodeHeartbeatRequest request){
List statuses=request.getNodeStatus().getContainersStatuses();
try {
Assert.assertEquals(1,statuses.size());
Assert.assertEquals(testCompleteContainer.getContainerId(),statuses.get(0).getContainerId());
}
catch ( AssertionError error) {
error.printStackTrace();
assertionFailedInThread.set(true);
}
return YarnServerBuilderUtils.newNodeHeartbeatResponse(1,NodeAction.RESYNC,null,null,null,null,1000L);
}
}
;
}
}
;
}
}
;
YarnConfiguration conf=createNMConfig();
nm.init(conf);
nm.start();
try {
syncBarrier.await();
}
catch ( BrokenBarrierException e) {
}
Assert.assertFalse(assertionFailedInThread.get());
nm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unchecked") @Test(timeout=10000) public void testNMshutdownWhenResyncThrowException() throws IOException, InterruptedException, YarnException {
NodeManager nm=new TestNodeManager3();
YarnConfiguration conf=createNMConfig();
nm.init(conf);
nm.start();
Assert.assertEquals(1,((TestNodeManager3)nm).getNMRegistrationCount());
nm.getNMDispatcher().getEventHandler().handle(new NodeManagerEvent(NodeManagerEventType.RESYNC));
synchronized (isNMShutdownCalled) {
while (isNMShutdownCalled.get() == false) {
try {
isNMShutdownCalled.wait();
}
catch ( InterruptedException e) {
}
}
}
Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get());
nm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNodeDecommision() throws Exception {
nm=getNodeManager(NodeAction.SHUTDOWN);
YarnConfiguration conf=createNMConfig();
nm.init(conf);
Assert.assertEquals(STATE.INITED,nm.getServiceState());
nm.start();
int waitCount=0;
while (heartBeatID < 1 && waitCount++ != 200) {
Thread.sleep(500);
}
Assert.assertFalse(heartBeatID < 1);
Assert.assertTrue(nm.getNMContext().getDecommissioned());
waitCount=0;
while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertEquals(STATE.STOPPED,nm.getServiceState());
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=150000) public void testNMConnectionToRM() throws Exception {
final long delta=50000;
final long connectionWaitMs=5000;
final long connectionRetryIntervalMs=1000;
final long rmStartIntervalMS=2 * 1000;
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,connectionWaitMs);
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,connectionRetryIntervalMs);
NodeManagerWithCustomNodeStatusUpdater nmWithUpdater;
nm=nmWithUpdater=new NodeManagerWithCustomNodeStatusUpdater(){
@Override protected NodeStatusUpdater createUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
NodeStatusUpdater nodeStatusUpdater=new MyNodeStatusUpdater4(context,dispatcher,healthChecker,metrics,rmStartIntervalMS,true);
return nodeStatusUpdater;
}
}
;
nm.init(conf);
long waitStartTime=System.currentTimeMillis();
try {
nm.start();
Assert.fail("NM should have failed to start due to RM connect failure");
}
catch ( Exception e) {
long t=System.currentTimeMillis();
long duration=t - waitStartTime;
boolean waitTimeValid=(duration >= connectionWaitMs) && (duration < (connectionWaitMs + delta));
if (!waitTimeValid) {
throw new Exception("NM should have tried re-connecting to RM during " + "period of at least " + connectionWaitMs + " ms, but "+ "stopped retrying within "+ (connectionWaitMs + delta)+ " ms: "+ e,e);
}
}
nm=nmWithUpdater=new NodeManagerWithCustomNodeStatusUpdater(){
@Override protected NodeStatusUpdater createUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
NodeStatusUpdater nodeStatusUpdater=new MyNodeStatusUpdater4(context,dispatcher,healthChecker,metrics,rmStartIntervalMS,false);
return nodeStatusUpdater;
}
}
;
nm.init(conf);
NodeStatusUpdater updater=nmWithUpdater.getUpdater();
Assert.assertNotNull("Updater not yet created ",updater);
waitStartTime=System.currentTimeMillis();
try {
nm.start();
}
catch ( Exception ex) {
LOG.error("NM should have started successfully " + "after connecting to RM.",ex);
throw ex;
}
long duration=System.currentTimeMillis() - waitStartTime;
MyNodeStatusUpdater4 myUpdater=(MyNodeStatusUpdater4)updater;
Assert.assertTrue("NM started before updater triggered",myUpdater.isTriggered());
Assert.assertTrue("NM should have connected to RM after " + "the start interval of " + rmStartIntervalMS + ": actual "+ duration+ " "+ myUpdater,(duration >= rmStartIntervalMS));
Assert.assertTrue("NM should have connected to RM less than " + (rmStartIntervalMS + delta) + " milliseconds of RM starting up: actual "+ duration+ " "+ myUpdater,(duration < (rmStartIntervalMS + delta)));
}
IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testNMRegistration() throws InterruptedException {
nm=new NodeManager(){
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
return new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics);
}
}
;
YarnConfiguration conf=createNMConfig();
nm.init(conf);
Object[] services=nm.getServices().toArray();
Object lastService=services[services.length - 1];
Assert.assertTrue("last service is NOT the node status updater",lastService instanceof NodeStatusUpdater);
new Thread(){
public void run(){
try {
nm.start();
}
catch ( Throwable e) {
TestNodeStatusUpdater.this.nmStartError=e;
throw new YarnRuntimeException(e);
}
}
}
.start();
System.out.println(" ----- thread already started.." + nm.getServiceState());
int waitCount=0;
while (nm.getServiceState() == STATE.INITED && waitCount++ != 50) {
LOG.info("Waiting for NM to start..");
if (nmStartError != null) {
LOG.error("Error during startup. ",nmStartError);
Assert.fail(nmStartError.getCause().getMessage());
}
Thread.sleep(2000);
}
if (nm.getServiceState() != STATE.STARTED) {
Assert.fail("NodeManager failed to start");
}
waitCount=0;
while (heartBeatID <= 3 && waitCount++ != 200) {
Thread.sleep(1000);
}
Assert.assertFalse(heartBeatID <= 3);
Assert.assertEquals("Number of registered NMs is wrong!!",1,this.registeredNodes.size());
nm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testRMVersionLessThanMinimum() throws InterruptedException {
final AtomicInteger numCleanups=new AtomicInteger(0);
YarnConfiguration conf=createNMConfig();
conf.set(YarnConfiguration.NM_RESOURCEMANAGER_MINIMUM_VERSION,"3.0.0");
nm=new NodeManager(){
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
MyNodeStatusUpdater myNodeStatusUpdater=new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics);
MyResourceTracker2 myResourceTracker2=new MyResourceTracker2();
myResourceTracker2.heartBeatNodeAction=NodeAction.NORMAL;
myResourceTracker2.rmVersion="3.0.0";
myNodeStatusUpdater.resourceTracker=myResourceTracker2;
return myNodeStatusUpdater;
}
@Override protected ContainerManagerImpl createContainerManager( Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler){
return new ContainerManagerImpl(context,exec,del,nodeStatusUpdater,metrics,aclsManager,dirsHandler){
@Override public void cleanUpApplicationsOnNMShutDown(){
super.cleanUpApplicationsOnNMShutDown();
numCleanups.incrementAndGet();
}
}
;
}
}
;
nm.init(conf);
nm.start();
int waitCount=0;
while (nm.getServiceState() != STATE.STARTED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertTrue(nm.getServiceState() == STATE.STARTED);
nm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test(timeout=90000) public void testRecentlyFinishedContainers() throws Exception {
NodeManager nm=new NodeManager();
YarnConfiguration conf=new YarnConfiguration();
conf.set(NodeStatusUpdaterImpl.YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS,"10000");
nm.init(conf);
NodeStatusUpdaterImpl nodeStatusUpdater=(NodeStatusUpdaterImpl)nm.getNodeStatusUpdater();
ApplicationId appId=ApplicationId.newInstance(0,0);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0);
ContainerId cId=ContainerId.newInstance(appAttemptId,0);
nodeStatusUpdater.addCompletedContainer(cId);
Assert.assertTrue(nodeStatusUpdater.isContainerRecentlyStopped(cId));
long time1=System.currentTimeMillis();
int waitInterval=15;
while (waitInterval-- > 0 && nodeStatusUpdater.isContainerRecentlyStopped(cId)) {
nodeStatusUpdater.removeVeryOldStoppedContainersFromCache();
Thread.sleep(1000);
}
long time2=System.currentTimeMillis();
Assert.assertFalse(nodeStatusUpdater.isContainerRecentlyStopped(cId));
Assert.assertTrue((time2 - time1) >= 10000 && (time2 - time1) <= 250000);
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testApplicationKeepAlive() throws Exception {
MyNodeManager nm=new MyNodeManager();
try {
YarnConfiguration conf=createNMConfig();
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,true);
conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,4000l);
nm.init(conf);
nm.start();
while (heartBeatID < 12) {
Thread.sleep(1000l);
}
MyResourceTracker3 rt=(MyResourceTracker3)nm.getNodeStatusUpdater().getRMClient();
rt.context.getApplications().remove(rt.appId);
Assert.assertEquals(1,rt.keepAliveRequests.size());
int numKeepAliveRequests=rt.keepAliveRequests.get(rt.appId).size();
LOG.info("Number of Keep Alive Requests: [" + numKeepAliveRequests + "]");
Assert.assertTrue(numKeepAliveRequests == 2 || numKeepAliveRequests == 3);
while (heartBeatID < 20) {
Thread.sleep(1000l);
}
int numKeepAliveRequests2=rt.keepAliveRequests.get(rt.appId).size();
Assert.assertEquals(numKeepAliveRequests,numKeepAliveRequests2);
}
finally {
if (nm.getServiceState() == STATE.STARTED) nm.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testStopReentrant() throws Exception {
final AtomicInteger numCleanups=new AtomicInteger(0);
nm=new NodeManager(){
@Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){
MyNodeStatusUpdater myNodeStatusUpdater=new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics);
MyResourceTracker2 myResourceTracker2=new MyResourceTracker2();
myResourceTracker2.heartBeatNodeAction=NodeAction.SHUTDOWN;
myNodeStatusUpdater.resourceTracker=myResourceTracker2;
return myNodeStatusUpdater;
}
@Override protected ContainerManagerImpl createContainerManager( Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler){
return new ContainerManagerImpl(context,exec,del,nodeStatusUpdater,metrics,aclsManager,dirsHandler){
@Override public void cleanUpApplicationsOnNMShutDown(){
super.cleanUpApplicationsOnNMShutDown();
numCleanups.incrementAndGet();
}
}
;
}
}
;
YarnConfiguration conf=createNMConfig();
nm.init(conf);
nm.start();
int waitCount=0;
while (heartBeatID < 1 && waitCount++ != 200) {
Thread.sleep(500);
}
Assert.assertFalse(heartBeatID < 1);
nm.stop();
waitCount=0;
while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) {
LOG.info("Waiting for NM to stop..");
Thread.sleep(1000);
}
Assert.assertEquals(STATE.STOPPED,nm.getServiceState());
Assert.assertEquals(numCleanups.get(),1);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=200000) public void testNodeStatusUpdaterRetryAndNMShutdown() throws Exception {
final long connectionWaitSecs=1000;
final long connectionRetryIntervalMs=1000;
YarnConfiguration conf=createNMConfig();
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,connectionWaitSecs);
conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,connectionRetryIntervalMs);
conf.setLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS,5000);
conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS,1);
CyclicBarrier syncBarrier=new CyclicBarrier(2);
nm=new MyNodeManager2(syncBarrier,conf);
nm.init(conf);
nm.start();
ContainerId cId=TestNodeManagerShutdown.createContainerId();
FileContext localFS=FileContext.getLocalFSFileContext();
TestNodeManagerShutdown.startContainer(nm,cId,localFS,nmLocalDir,new File("start_file.txt"));
try {
syncBarrier.await(10000,TimeUnit.MILLISECONDS);
}
catch ( Exception e) {
}
Assert.assertFalse("Containers not cleaned up when NM stopped",assertionFailedInThread.get());
Assert.assertTrue(((MyNodeManager2)nm).isStopped);
Assert.assertTrue("calculate heartBeatCount based on" + " connectionWaitSecs and RetryIntervalSecs",heartBeatID == 2);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testPbRecordFactory(){
RecordFactory pbRecordFactory=RecordFactoryPBImpl.get();
try {
LocalizerHeartbeatResponse response=pbRecordFactory.newRecordInstance(LocalizerHeartbeatResponse.class);
Assert.assertEquals(LocalizerHeartbeatResponsePBImpl.class,response.getClass());
}
catch ( YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testLocalizerRPC() throws Exception {
InetSocketAddress locAddr=new InetSocketAddress("0.0.0.0",8040);
LocalizerService server=new LocalizerService(locAddr);
try {
server.start();
Configuration conf=new Configuration();
YarnRPC rpc=YarnRPC.create(conf);
LocalizationProtocol client=(LocalizationProtocol)rpc.getProxy(LocalizationProtocol.class,locAddr,conf);
LocalizerStatus status=recordFactory.newRecordInstance(LocalizerStatus.class);
status.setLocalizerId("localizer0");
LocalizerHeartbeatResponse response=client.heartbeat(status);
assertEquals(dieHBResponse(),response);
}
finally {
server.stop();
}
assertTrue(true);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLocalizerHeartbeatResponseSerDe() throws Exception {
LocalizerHeartbeatResponse rsrcS=createLocalizerHeartbeatResponse();
assertTrue(rsrcS instanceof LocalizerHeartbeatResponsePBImpl);
LocalizerHeartbeatResponsePBImpl rsrcPb=(LocalizerHeartbeatResponsePBImpl)rsrcS;
DataOutputBuffer out=new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in=new DataInputBuffer();
in.reset(out.getData(),0,out.getLength());
LocalizerHeartbeatResponseProto rsrcPbD=LocalizerHeartbeatResponseProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalizerHeartbeatResponse rsrcD=new LocalizerHeartbeatResponsePBImpl(rsrcPbD);
assertEquals(rsrcS,rsrcD);
assertEquals(createResource(),rsrcS.getResourceSpecs().get(0).getResource());
assertEquals(createResource(),rsrcD.getResourceSpecs().get(0).getResource());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testSerializedExceptionDeSer() throws Exception {
YarnException yarnEx=new YarnException("Yarn_Exception");
SerializedException serEx=SerializedException.newInstance(yarnEx);
Throwable throwable=serEx.deSerialize();
Assert.assertEquals(yarnEx.getClass(),throwable.getClass());
Assert.assertEquals(yarnEx.getMessage(),throwable.getMessage());
IOException ioe=new IOException("Test_IOException");
RuntimeException runtimeException=new RuntimeException("Test_RuntimeException",ioe);
YarnException yarnEx2=new YarnException("Test_YarnException",runtimeException);
SerializedException serEx2=SerializedException.newInstance(yarnEx2);
Throwable throwable2=serEx2.deSerialize();
throwable2.printStackTrace();
Assert.assertEquals(yarnEx2.getClass(),throwable2.getClass());
Assert.assertEquals(yarnEx2.getMessage(),throwable2.getMessage());
Assert.assertEquals(runtimeException.getClass(),throwable2.getCause().getClass());
Assert.assertEquals(runtimeException.getMessage(),throwable2.getCause().getMessage());
Assert.assertEquals(ioe.getClass(),throwable2.getCause().getCause().getClass());
Assert.assertEquals(ioe.getMessage(),throwable2.getCause().getCause().getMessage());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLocalizerStatusSerDe() throws Exception {
LocalizerStatus rsrcS=createLocalizerStatus();
assertTrue(rsrcS instanceof LocalizerStatusPBImpl);
LocalizerStatusPBImpl rsrcPb=(LocalizerStatusPBImpl)rsrcS;
DataOutputBuffer out=new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in=new DataInputBuffer();
in.reset(out.getData(),0,out.getLength());
LocalizerStatusProto rsrcPbD=LocalizerStatusProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalizerStatus rsrcD=new LocalizerStatusPBImpl(rsrcPbD);
assertEquals(rsrcS,rsrcD);
assertEquals("localizer0",rsrcS.getLocalizerId());
assertEquals("localizer0",rsrcD.getLocalizerId());
assertEquals(createLocalResourceStatus(),rsrcS.getResourceStatus(0));
assertEquals(createLocalResourceStatus(),rsrcD.getResourceStatus(0));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLocalResourceStatusSerDe() throws Exception {
LocalResourceStatus rsrcS=createLocalResourceStatus();
assertTrue(rsrcS instanceof LocalResourceStatusPBImpl);
LocalResourceStatusPBImpl rsrcPb=(LocalResourceStatusPBImpl)rsrcS;
DataOutputBuffer out=new DataOutputBuffer();
rsrcPb.getProto().writeDelimitedTo(out);
DataInputBuffer in=new DataInputBuffer();
in.reset(out.getData(),0,out.getLength());
LocalResourceStatusProto rsrcPbD=LocalResourceStatusProto.parseDelimitedFrom(in);
assertNotNull(rsrcPbD);
LocalResourceStatus rsrcD=new LocalResourceStatusPBImpl(rsrcPbD);
assertEquals(rsrcS,rsrcD);
assertEquals(createResource(),rsrcS.getResource());
assertEquals(createResource(),rsrcD.getResource());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAuxUnexpectedStop(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
final AuxServices aux=new AuxServices();
aux.init(conf);
aux.start();
Service s=aux.getServices().iterator().next();
s.stop();
assertEquals("Auxiliary service stopped, but AuxService unaffected.",STOPPED,aux.getServiceState());
assertTrue(aux.getServices().isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAuxEventDispatch(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
conf.setInt("A.expected.init",1);
conf.setInt("B.expected.stop",1);
final AuxServices aux=new AuxServices();
aux.init(conf);
aux.start();
ApplicationId appId1=ApplicationId.newInstance(0,65);
ByteBuffer buf=ByteBuffer.allocate(6);
buf.putChar('A');
buf.putInt(65);
buf.flip();
AuxServicesEvent event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_INIT,"user0",appId1,"Asrv",buf);
aux.handle(event);
ApplicationId appId2=ApplicationId.newInstance(0,66);
event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP,"user0",appId2,"Bsrv",null);
aux.handle(event);
Collection servs=aux.getServices();
for ( AuxiliaryService serv : servs) {
ArrayList appIds=((LightService)serv).getAppIdsStopped();
assertEquals("app not properly stopped",1,appIds.size());
assertTrue("wrong app stopped",appIds.contains((Integer)66));
}
for ( AuxiliaryService serv : servs) {
assertNull(((LightService)serv).containerId);
assertNull(((LightService)serv).resource);
}
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId1,1);
ContainerTokenIdentifier cti=new ContainerTokenIdentifier(ContainerId.newInstance(attemptId,1),"","",Resource.newInstance(1,1),0,0,0,Priority.newInstance(0),0);
Container container=new ContainerImpl(null,null,null,null,null,null,cti);
ContainerId containerId=container.getContainerId();
Resource resource=container.getResource();
event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_INIT,container);
aux.handle(event);
for ( AuxiliaryService serv : servs) {
assertEquals(containerId,((LightService)serv).containerId);
assertEquals(resource,((LightService)serv).resource);
((LightService)serv).containerId=null;
((LightService)serv).resource=null;
}
event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_STOP,container);
aux.handle(event);
for ( AuxiliaryService serv : servs) {
assertEquals(containerId,((LightService)serv).containerId);
assertEquals(resource,((LightService)serv).resource);
}
}
BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testAuxServices(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
final AuxServices aux=new AuxServices();
aux.init(conf);
int latch=1;
for ( Service s : aux.getServices()) {
assertEquals(INITED,s.getServiceState());
if (s instanceof ServiceA) {
latch*=2;
}
else if (s instanceof ServiceB) {
latch*=3;
}
else fail("Unexpected service type " + s.getClass());
}
assertEquals("Invalid mix of services",6,latch);
aux.start();
for ( Service s : aux.getServices()) {
assertEquals(STARTED,s.getServiceState());
}
aux.stop();
for ( Service s : aux.getServices()) {
assertEquals(STOPPED,s.getServiceState());
}
}
BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testAuxServicesMeta(){
Configuration conf=new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class);
final AuxServices aux=new AuxServices();
aux.init(conf);
int latch=1;
for ( Service s : aux.getServices()) {
assertEquals(INITED,s.getServiceState());
if (s instanceof ServiceA) {
latch*=2;
}
else if (s instanceof ServiceB) {
latch*=3;
}
else fail("Unexpected service type " + s.getClass());
}
assertEquals("Invalid mix of services",6,latch);
aux.start();
for ( Service s : aux.getServices()) {
assertEquals(STARTED,s.getServiceState());
}
Map meta=aux.getMetaData();
assertEquals(2,meta.size());
assertEquals("A",new String(meta.get("Asrv").array()));
assertEquals("B",new String(meta.get("Bsrv").array()));
aux.stop();
for ( Service s : aux.getServices()) {
assertEquals(STOPPED,s.getServiceState());
}
}
InternalCallVerifier EqualityVerifier
@Test public void testAuxServiceRecoverySetup() throws IOException {
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.NM_RECOVERY_DIR,TEST_DIR.toString());
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),RecoverableServiceA.class,Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),RecoverableServiceB.class,Service.class);
try {
final AuxServices aux=new AuxServices();
aux.init(conf);
Assert.assertEquals(2,aux.getServices().size());
File auxStorageDir=new File(TEST_DIR,AuxServices.STATE_STORE_ROOT_NAME);
Assert.assertEquals(2,auxStorageDir.listFiles().length);
aux.close();
}
finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleContainersLaunch() throws Exception {
containerManager.start();
List list=new ArrayList();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
for (int i=0; i < 10; i++) {
ContainerId cId=createContainerId(i);
long identifier=0;
if ((i & 1) == 0) identifier=ResourceManagerConstants.RM_INVALID_IDENTIFIER;
else identifier=DUMMY_RM_IDENTIFIER;
Token containerToken=createContainerToken(cId,identifier,context.getNodeId(),user,context.getContainerTokenSecretManager());
StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
list.add(request);
}
StartContainersRequest requestList=StartContainersRequest.newInstance(list);
StartContainersResponse response=containerManager.startContainers(requestList);
Assert.assertEquals(5,response.getSuccessfullyStartedContainers().size());
for ( ContainerId id : response.getSuccessfullyStartedContainers()) {
Assert.assertEquals(1,id.getId() & 1);
}
Assert.assertEquals(5,response.getFailedRequests().size());
for ( Map.Entry entry : response.getFailedRequests().entrySet()) {
Assert.assertEquals(0,entry.getKey().getId() & 1);
Assert.assertTrue(entry.getValue().getMessage().contains("Container " + entry.getKey() + " rejected as it is allocated by a previous RM"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMultipleContainersStopAndGetStatus() throws Exception {
containerManager.start();
List startRequest=new ArrayList();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
List containerIds=new ArrayList();
for (int i=0; i < 10; i++) {
ContainerId cId=createContainerId(i);
String user=null;
if ((i & 1) == 0) {
user="Fail";
}
else {
user="Pass";
}
Token containerToken=createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager());
StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
startRequest.add(request);
containerIds.add(cId);
}
StartContainersRequest requestList=StartContainersRequest.newInstance(startRequest);
containerManager.startContainers(requestList);
GetContainerStatusesRequest statusRequest=GetContainerStatusesRequest.newInstance(containerIds);
GetContainerStatusesResponse statusResponse=containerManager.getContainerStatuses(statusRequest);
Assert.assertEquals(5,statusResponse.getContainerStatuses().size());
for ( ContainerStatus status : statusResponse.getContainerStatuses()) {
Assert.assertEquals(1,status.getContainerId().getId() & 1);
}
Assert.assertEquals(5,statusResponse.getFailedRequests().size());
for ( Map.Entry entry : statusResponse.getFailedRequests().entrySet()) {
Assert.assertEquals(0,entry.getKey().getId() & 1);
Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container"));
}
StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds);
StopContainersResponse stopResponse=containerManager.stopContainers(stopRequest);
Assert.assertEquals(5,stopResponse.getSuccessfullyStoppedContainers().size());
for ( ContainerId id : stopResponse.getSuccessfullyStoppedContainers()) {
Assert.assertEquals(1,id.getId() & 1);
}
Assert.assertEquals(5,stopResponse.getFailedRequests().size());
for ( Map.Entry entry : stopResponse.getFailedRequests().entrySet()) {
Assert.assertEquals(0,entry.getKey().getId() & 1);
Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container"));
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerSetup() throws Exception {
containerManager.start();
File dir=new File(tmpDir,"dir");
dir.mkdirs();
File file=new File(dir,"file");
PrintWriter fileWriter=new PrintWriter(file);
fileWriter.write("Hello World!");
fileWriter.close();
ContainerId cId=createContainerId(0);
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(file.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(file.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
ApplicationId appId=cId.getApplicationAttemptId().getApplicationId();
String appIDStr=ConverterUtils.toString(appId);
String containerIDStr=ConverterUtils.toString(cId);
File userCacheDir=new File(localDir,ContainerLocalizer.USERCACHE);
File userDir=new File(userCacheDir,user);
File appCache=new File(userDir,ContainerLocalizer.APPCACHE);
File appDir=new File(appCache,appIDStr);
File containerDir=new File(appDir,containerIDStr);
File targetFile=new File(containerDir,destinationFile);
File sysDir=new File(localDir,ResourceLocalizationService.NM_PRIVATE_DIR);
File appSysDir=new File(sysDir,appIDStr);
File containerSysDir=new File(appSysDir,containerIDStr);
for ( File f : new File[]{localDir,sysDir,userCacheDir,appDir,appSysDir,containerDir,containerSysDir}) {
Assert.assertTrue(f.getAbsolutePath() + " doesn't exist!!",f.exists());
Assert.assertTrue(f.getAbsolutePath() + " is not a directory!!",f.isDirectory());
}
Assert.assertTrue(targetFile.getAbsolutePath() + " doesn't exist!!",targetFile.exists());
BufferedReader reader=new BufferedReader(new FileReader(targetFile));
Assert.assertEquals("Hello World!",reader.readLine());
Assert.assertEquals(null,reader.readLine());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testStartContainerFailureWithUnknownAuxService() throws Exception {
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"existService"});
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"existService"),ServiceA.class,Service.class);
containerManager.start();
List startRequest=new ArrayList();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
Map serviceData=new HashMap();
String serviceName="non_exist_auxService";
serviceData.put(serviceName,ByteBuffer.wrap(serviceName.getBytes()));
containerLaunchContext.setServiceData(serviceData);
ContainerId cId=createContainerId(0);
String user="start_container_fail";
Token containerToken=createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager());
StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
startRequest.add(request);
StartContainersRequest requestList=StartContainersRequest.newInstance(startRequest);
StartContainersResponse response=containerManager.startContainers(requestList);
Assert.assertTrue(response.getFailedRequests().size() == 1);
Assert.assertTrue(response.getSuccessfullyStartedContainers().size() == 0);
Assert.assertTrue(response.getFailedRequests().containsKey(cId));
Assert.assertTrue(response.getFailedRequests().get(cId).getMessage().contains("The auxService:" + serviceName + " does not exist"));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testLocalFilesCleanup() throws InterruptedException, IOException, YarnException {
delSrvc=new DeletionService(exec);
delSrvc.init(conf);
containerManager=createContainerManager(delSrvc);
containerManager.init(conf);
containerManager.start();
File dir=new File(tmpDir,"dir");
dir.mkdirs();
File file=new File(dir,"file");
PrintWriter fileWriter=new PrintWriter(file);
fileWriter.write("Hello World!");
fileWriter.close();
ContainerId cId=createContainerId(0);
ApplicationId appId=cId.getApplicationAttemptId().getApplicationId();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(FileContext.getLocalFSFileContext().makeQualified(new Path(file.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(file.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
BaseContainerManagerTest.waitForApplicationState(containerManager,cId.getApplicationAttemptId().getApplicationId(),ApplicationState.RUNNING);
String appIDStr=ConverterUtils.toString(appId);
String containerIDStr=ConverterUtils.toString(cId);
File userCacheDir=new File(localDir,ContainerLocalizer.USERCACHE);
File userDir=new File(userCacheDir,user);
File appCache=new File(userDir,ContainerLocalizer.APPCACHE);
File appDir=new File(appCache,appIDStr);
File containerDir=new File(appDir,containerIDStr);
File targetFile=new File(containerDir,destinationFile);
File sysDir=new File(localDir,ResourceLocalizationService.NM_PRIVATE_DIR);
File appSysDir=new File(sysDir,appIDStr);
File containerSysDir=new File(appSysDir,containerIDStr);
Assert.assertTrue("AppDir " + appDir.getAbsolutePath() + " doesn't exist!!",appDir.exists());
Assert.assertTrue("AppSysDir " + appSysDir.getAbsolutePath() + " doesn't exist!!",appSysDir.exists());
for ( File f : new File[]{containerDir,containerSysDir}) {
Assert.assertFalse(f.getAbsolutePath() + " exists!!",f.exists());
}
Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!",targetFile.exists());
containerManager.handle(new CMgrCompletedAppsEvent(Arrays.asList(new ApplicationId[]{appId}),CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN));
BaseContainerManagerTest.waitForApplicationState(containerManager,cId.getApplicationAttemptId().getApplicationId(),ApplicationState.FINISHED);
for ( File f : new File[]{appDir,containerDir,appSysDir,containerSysDir}) {
int timeout=0;
while (f.exists() && timeout++ < 15) {
Thread.sleep(1000);
}
Assert.assertFalse(f.getAbsolutePath() + " exists!!",f.exists());
}
int timeout=0;
while (targetFile.exists() && timeout++ < 15) {
Thread.sleep(1000);
}
Assert.assertFalse(targetFile.getAbsolutePath() + " exists!!",targetFile.exists());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationRecovery() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.NM_ADDRESS,"localhost:1234");
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,"yarn_admin_user");
NMStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
Context context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
ContainerManagerImpl cm=createContainerManager(context);
cm.init(conf);
cm.start();
MasterKey masterKey=new MasterKeyPBImpl();
masterKey.setKeyId(123);
masterKey.setBytes(ByteBuffer.wrap(new byte[]{new Integer(123).byteValue()}));
context.getContainerTokenSecretManager().setMasterKey(masterKey);
context.getNMTokenSecretManager().setMasterKey(masterKey);
String appUser="app_user1";
String modUser="modify_user1";
String viewUser="view_user1";
String enemyUser="enemy_user";
ApplicationId appId=ApplicationId.newInstance(0,1);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId cid=ContainerId.newInstance(attemptId,1);
Map localResources=Collections.emptyMap();
Map containerEnv=Collections.emptyMap();
List containerCmds=Collections.emptyList();
Map serviceData=Collections.emptyMap();
Credentials containerCreds=new Credentials();
DataOutputBuffer dob=new DataOutputBuffer();
containerCreds.writeTokenStorageToStream(dob);
ByteBuffer containerTokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength());
Map acls=new HashMap();
acls.put(ApplicationAccessType.MODIFY_APP,modUser);
acls.put(ApplicationAccessType.VIEW_APP,viewUser);
ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,containerEnv,containerCmds,serviceData,containerTokens,acls);
StartContainersResponse startResponse=startContainer(context,cm,cid,clc);
assertTrue(startResponse.getFailedRequests().isEmpty());
assertEquals(1,context.getApplications().size());
Application app=context.getApplications().get(appId);
assertNotNull(app);
waitForAppState(app,ApplicationState.INITING);
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId));
cm.stop();
context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
cm=createContainerManager(context);
cm.init(conf);
cm.start();
assertEquals(1,context.getApplications().size());
app=context.getApplications().get(appId);
assertNotNull(app);
waitForAppState(app,ApplicationState.INITING);
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId));
List finishedApps=new ArrayList();
finishedApps.add(appId);
cm.handle(new CMgrCompletedAppsEvent(finishedApps,CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER));
waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP);
cm.stop();
context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
cm=createContainerManager(context);
cm.init(conf);
cm.start();
assertEquals(1,context.getApplications().size());
app=context.getApplications().get(appId);
assertNotNull(app);
waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP);
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId));
assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId));
assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId));
app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP));
assertEquals(app.getApplicationState(),ApplicationState.FINISHED);
app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED));
cm.stop();
context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore);
cm=createContainerManager(context);
cm.init(conf);
cm.start();
assertTrue(context.getApplications().isEmpty());
cm.stop();
}
InternalCallVerifier EqualityVerifier
/**
* All container start events before application running.
*/
@Test public void testApplicationInit1(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(1,314159265358979L,"yak",3);
wa.initApplication();
wa.initContainer(1);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
assertEquals(1,wa.app.getContainers().size());
wa.initContainer(0);
wa.initContainer(2);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
assertEquals(3,wa.app.getContainers().size());
wa.applicationInited();
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
for (int i=0; i < wa.containers.size(); i++) {
verify(wa.containerBus).handle(argThat(new ContainerInitMatcher(wa.containers.get(i).getContainerId())));
}
}
finally {
if (wa != null) wa.finished();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNMTokenSecretManagerCleanup(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(1,314159265358979L,"yak",1);
wa.initApplication();
wa.initContainer(0);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
assertEquals(1,wa.app.getContainers().size());
wa.appFinished();
wa.containerFinished(0);
wa.appResourcesCleanedup();
assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState());
verify(wa.nmTokenSecretMgr).appFinished(eq(wa.appId));
}
finally {
if (wa != null) {
wa.finished();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Container start events after Application Running
*/
@Test public void testApplicationInit2(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(2,314159265358979L,"yak",3);
wa.initApplication();
wa.initContainer(0);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
assertEquals(1,wa.app.getContainers().size());
wa.applicationInited();
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
verify(wa.containerBus).handle(argThat(new ContainerInitMatcher(wa.containers.get(0).getContainerId())));
wa.initContainer(1);
wa.initContainer(2);
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(3,wa.app.getContainers().size());
for (int i=1; i < wa.containers.size(); i++) {
verify(wa.containerBus).handle(argThat(new ContainerInitMatcher(wa.containers.get(i).getContainerId())));
}
}
finally {
if (wa != null) wa.finished();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testAppFinishedOnCompletedContainers(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(5,314159265358979L,"yak",3);
wa.initApplication();
wa.initContainer(-1);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
wa.applicationInited();
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
reset(wa.localizerBus);
wa.containerFinished(0);
wa.containerFinished(1);
wa.containerFinished(2);
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(0,wa.app.getContainers().size());
wa.appFinished();
assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,wa.app.getApplicationState());
verify(wa.localizerBus).handle(refEq(new ApplicationLocalizationEvent(LocalizationEventType.DESTROY_APPLICATION_RESOURCES,wa.app)));
wa.appResourcesCleanedup();
for ( Container container : wa.containers) {
ContainerTokenIdentifier identifier=wa.getContainerTokenIdentifier(container.getContainerId());
waitForContainerTokenToExpire(identifier);
Assert.assertTrue(wa.context.getContainerTokenSecretManager().isValidStartContainerRequest(identifier));
}
assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState());
}
finally {
if (wa != null) wa.finished();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testAppFinishedOnRunningContainers(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(4,314159265358979L,"yak",3);
wa.initApplication();
wa.initContainer(-1);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
wa.applicationInited();
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
wa.containerFinished(0);
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(2,wa.app.getContainers().size());
wa.appFinished();
assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,wa.app.getApplicationState());
assertEquals(2,wa.app.getContainers().size());
for (int i=1; i < wa.containers.size(); i++) {
verify(wa.containerBus).handle(argThat(new ContainerKillMatcher(wa.containers.get(i).getContainerId())));
}
wa.containerFinished(1);
assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,wa.app.getApplicationState());
assertEquals(1,wa.app.getContainers().size());
reset(wa.localizerBus);
wa.containerFinished(2);
assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,wa.app.getApplicationState());
assertEquals(0,wa.app.getContainers().size());
verify(wa.localizerBus).handle(refEq(new ApplicationLocalizationEvent(LocalizationEventType.DESTROY_APPLICATION_RESOURCES,wa.app)));
verify(wa.auxBus).handle(refEq(new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP,wa.appId)));
wa.appResourcesCleanedup();
for ( Container container : wa.containers) {
ContainerTokenIdentifier identifier=wa.getContainerTokenIdentifier(container.getContainerId());
waitForContainerTokenToExpire(identifier);
Assert.assertTrue(wa.context.getContainerTokenSecretManager().isValidStartContainerRequest(identifier));
}
assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState());
}
finally {
if (wa != null) wa.finished();
}
}
InternalCallVerifier EqualityVerifier
/**
* Finished containers properly tracked when 1 of several containers finishes in APP_INITING
*/
@Test public void testContainersCompleteDuringAppInit2(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(3,314159265358979L,"yak",3);
wa.initApplication();
wa.initContainer(-1);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
wa.containerFinished(0);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
wa.applicationInited();
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(2,wa.app.getContainers().size());
wa.containerFinished(1);
wa.containerFinished(2);
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(0,wa.app.getContainers().size());
}
finally {
if (wa != null) wa.finished();
}
}
InternalCallVerifier EqualityVerifier
/**
* Finished containers properly tracked when only container finishes in APP_INITING
*/
@Test public void testContainersCompleteDuringAppInit1(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(3,314159265358979L,"yak",1);
wa.initApplication();
wa.initContainer(-1);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
wa.containerFinished(0);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
wa.applicationInited();
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(0,wa.app.getContainers().size());
}
finally {
if (wa != null) wa.finished();
}
}
InternalCallVerifier EqualityVerifier
/**
* App state RUNNING after all containers complete, before RM sends
* APP_FINISHED
*/
@Test public void testAppRunningAfterContainersComplete(){
WrappedApplication wa=null;
try {
wa=new WrappedApplication(3,314159265358979L,"yak",3);
wa.initApplication();
wa.initContainer(-1);
assertEquals(ApplicationState.INITING,wa.app.getApplicationState());
wa.applicationInited();
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
wa.containerFinished(0);
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(2,wa.app.getContainers().size());
wa.containerFinished(1);
wa.containerFinished(2);
assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState());
assertEquals(0,wa.app.getContainers().size());
}
finally {
if (wa != null) wa.finished();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testInitWhileDone() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(6,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerSuccessful();
wc.containerResourcesCleanup();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.initContainer();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testResourceLocalizedOnLocalizationFailed() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(16,314159265358979L,4344,"yak");
wc.initContainer();
int failCount=wc.getLocalResourceCount() / 2;
if (failCount == 0) {
failCount=1;
}
wc.failLocalizeResources(failCount);
assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.localizeResourcesFromInvalidState(failCount);
assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
Assert.assertTrue(wc.getDiagnostics().contains(FAKE_LOCALIZATION_ERROR));
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testKillOnLocalizedWhenContainerLaunched() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(17,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState());
ContainerLaunch launcher=wc.launcher.running.get(wc.c.getContainerId());
launcher.call();
wc.drainDispatcherEvents();
assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState());
wc.killContainer();
assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testKillOnLocalizedWhenContainerNotLaunched() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(17,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState());
ContainerLaunch launcher=wc.launcher.running.get(wc.c.getContainerId());
wc.killContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
launcher.call();
wc.drainDispatcherEvents();
assertEquals(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
wc.c.handle(new ContainerEvent(wc.c.getContainerId(),ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
assertEquals(0,metrics.getRunningContainers());
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testCleanupOnKillRequest() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(12,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.killContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.containerKilledOnRequest();
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
InternalCallVerifier EqualityVerifier
/**
* Verify correct container request events sent to localizer.
*/
@Test public void testLocalizationRequest() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(7,314159265358979L,4344,"yak");
assertEquals(ContainerState.NEW,wc.c.getContainerState());
wc.initContainer();
ResourcesRequestedMatcher matchesReq=new ResourcesRequestedMatcher(wc.localResources,EnumSet.of(LocalResourceVisibility.PUBLIC,LocalResourceVisibility.PRIVATE,LocalResourceVisibility.APPLICATION));
verify(wc.localizerBus).handle(argThat(matchesReq));
assertEquals(ContainerState.LOCALIZING,wc.c.getContainerState());
}
finally {
if (wc != null) {
wc.finished();
}
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testResourceFailedOnLocalizationFailed() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(16,314159265358979L,4344,"yak");
wc.initContainer();
Iterator lRsrcKeys=wc.localResources.keySet().iterator();
String key1=lRsrcKeys.next();
String key2=lRsrcKeys.next();
wc.failLocalizeSpecificResource(key1);
assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.failLocalizeSpecificResource(key2);
assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testCleanupOnSuccess() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(11,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerSuccessful();
assertEquals(ContainerState.EXITED_WITH_SUCCESS,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKillOnLocalizing() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(14,314159265358979L,4344,"yak");
wc.initContainer();
assertEquals(ContainerState.LOCALIZING,wc.c.getContainerState());
wc.killContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,wc.c.cloneAndGetContainerStatus().getExitStatus());
assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics().contains("KillRequest"));
}
finally {
if (wc != null) {
wc.finished();
}
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testKillOnLocalizationFailed() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(15,314159265358979L,4344,"yak");
wc.initContainer();
wc.failLocalizeResources(wc.getLocalResourceCount());
assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.killContainer();
assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testLaunchAfterKillRequest() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(14,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.killContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.launchContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.containerKilledOnRequest();
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testResourceFailedOnKilling() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(16,314159265358979L,4344,"yak");
wc.initContainer();
Iterator lRsrcKeys=wc.localResources.keySet().iterator();
String key1=lRsrcKeys.next();
wc.killContainer();
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.failLocalizeSpecificResource(key1);
assertEquals(ContainerState.KILLING,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testCleanupOnFailure() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(10,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerFailed(ExitCode.FORCE_KILLED.getExitCode());
assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testExternalKill() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(13,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerKilledOnRequest();
assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testLocalizationFailureAtDone() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(6,314159265358979L,4344,"yak");
wc.initContainer();
wc.localizeResources();
wc.launchContainer();
reset(wc.localizerBus);
wc.containerSuccessful();
wc.containerResourcesCleanup();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
wc.resourceFailedContainer();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertNull(wc.c.getLocalizedResources());
verifyCleanupCall(wc);
}
finally {
if (wc != null) {
wc.finished();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKillOnNew() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(13,314159265358979L,4344,"yak");
assertEquals(ContainerState.NEW,wc.c.getContainerState());
wc.killContainer();
assertEquals(ContainerState.DONE,wc.c.getContainerState());
assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,wc.c.cloneAndGetContainerStatus().getExitStatus());
assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics().contains("KillRequest"));
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
/**
* Verify container launch when all resources already cached.
*/
@Test public void testLocalizationLaunch() throws Exception {
WrappedContainer wc=null;
try {
wc=new WrappedContainer(8,314159265358979L,4344,"yak");
assertEquals(ContainerState.NEW,wc.c.getContainerState());
wc.initContainer();
Map> localPaths=wc.localizeResources();
assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState());
assertNotNull(wc.c.getLocalizedResources());
for ( Entry> loc : wc.c.getLocalizedResources().entrySet()) {
assertEquals(localPaths.remove(loc.getKey()),loc.getValue());
}
assertTrue(localPaths.isEmpty());
final WrappedContainer wcf=wc;
ArgumentMatcher matchesContainerLaunch=new ArgumentMatcher(){
@Override public boolean matches( Object o){
ContainersLauncherEvent launchEvent=(ContainersLauncherEvent)o;
return wcf.c == launchEvent.getContainer();
}
}
;
verify(wc.launcherBus).handle(argThat(matchesContainerLaunch));
}
finally {
if (wc != null) {
wc.finished();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSpecialCharSymlinks() throws IOException {
File shellFile=null;
File tempFile=null;
String badSymlink=Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" : "foo@zz%_#*&!-+= bar()";
File symLinkFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
tempFile=Shell.appendScriptExtension(tmpDir,"temp");
String timeoutCommand=Shell.WINDOWS ? "@echo \"hello\"" : "echo \"hello\"";
PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile));
FileUtil.setExecutable(shellFile,true);
writer.println(timeoutCommand);
writer.close();
Map> resources=new HashMap>();
Path path=new Path(shellFile.getAbsolutePath());
resources.put(path,Arrays.asList(badSymlink));
FileOutputStream fos=new FileOutputStream(tempFile);
Map env=new HashMap();
List commands=new ArrayList();
if (Shell.WINDOWS) {
commands.add("cmd");
commands.add("/c");
commands.add("\"" + badSymlink + "\"");
}
else {
commands.add("/bin/sh ./\\\"" + badSymlink + "\\\"");
}
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
FileUtil.setExecutable(tempFile,true);
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{tempFile.getAbsolutePath()},tmpDir);
shexc.execute();
assertEquals(shexc.getExitCode(),0);
assert (shexc.getOutput().contains("hello"));
symLinkFile=new File(tmpDir,badSymlink);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
if (tempFile != null && tempFile.exists()) {
tempFile.delete();
}
if (symLinkFile != null && symLinkFile.exists()) {
symLinkFile.delete();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testInvalidEnvSyntaxDiagnostics() throws IOException {
File shellFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
Map> resources=new HashMap>();
FileOutputStream fos=new FileOutputStream(shellFile);
FileUtil.setExecutable(shellFile,true);
Map env=new HashMap();
env.put("APPLICATION_WORKFLOW_CONTEXT","{\"workflowId\":\"609f91c5cd83\"," + "\"workflowName\":\"\n\ninsert table " + "\npartition (cd_education_status)\nselect cd_demo_sk, cd_gender, ");
List commands=new ArrayList();
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
Map cmdEnv=new HashMap();
cmdEnv.put("LANG","C");
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},tmpDir,cmdEnv);
String diagnostics=null;
try {
shexc.execute();
Assert.fail("Should catch exception");
}
catch ( ExitCodeException e) {
diagnostics=e.getMessage();
}
Assert.assertTrue(diagnostics.contains(Shell.WINDOWS ? "is not recognized as an internal or external command" : "command not found"));
Assert.assertTrue(shexc.getExitCode() != 0);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=20000) public void testInvalidSymlinkDiagnostics() throws IOException {
File shellFile=null;
File tempFile=null;
String symLink=Shell.WINDOWS ? "test.cmd" : "test";
File symLinkFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
tempFile=Shell.appendScriptExtension(tmpDir,"temp");
String timeoutCommand=Shell.WINDOWS ? "@echo \"hello\"" : "echo \"hello\"";
PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile));
FileUtil.setExecutable(shellFile,true);
writer.println(timeoutCommand);
writer.close();
Map> resources=new HashMap>();
Path invalidPath=new Path(shellFile.getAbsolutePath() + "randomPath");
resources.put(invalidPath,Arrays.asList(symLink));
FileOutputStream fos=new FileOutputStream(tempFile);
Map env=new HashMap();
List commands=new ArrayList();
if (Shell.WINDOWS) {
commands.add("cmd");
commands.add("/c");
commands.add("\"" + symLink + "\"");
}
else {
commands.add("/bin/sh ./\\\"" + symLink + "\\\"");
}
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
FileUtil.setExecutable(tempFile,true);
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{tempFile.getAbsolutePath()},tmpDir);
String diagnostics=null;
try {
shexc.execute();
Assert.fail("Should catch exception");
}
catch ( ExitCodeException e) {
diagnostics=e.getMessage();
}
Assert.assertNotNull(diagnostics);
Assert.assertTrue(shexc.getExitCode() != 0);
symLinkFile=new File(tmpDir,symLink);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
if (tempFile != null && tempFile.exists()) {
tempFile.delete();
}
if (symLinkFile != null && symLinkFile.exists()) {
symLinkFile.delete();
}
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testContainerLaunchStdoutAndStderrDiagnostics() throws IOException {
File shellFile=null;
try {
shellFile=Shell.appendScriptExtension(tmpDir,"hello");
String command=Shell.WINDOWS ? "@echo \"hello\" & @echo \"error\" 1>&2 & exit /b 2" : "echo \"hello\"; echo \"error\" 1>&2; exit 2;";
PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile));
FileUtil.setExecutable(shellFile,true);
writer.println(command);
writer.close();
Map> resources=new HashMap>();
FileOutputStream fos=new FileOutputStream(shellFile,true);
Map env=new HashMap();
List commands=new ArrayList();
commands.add(command);
ContainerLaunch.writeLaunchEnv(fos,env,resources,commands);
fos.flush();
fos.close();
Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{shellFile.getAbsolutePath()},tmpDir);
String diagnostics=null;
try {
shexc.execute();
Assert.fail("Should catch exception");
}
catch ( ExitCodeException e) {
diagnostics=e.getMessage();
}
Assert.assertTrue(diagnostics.contains("error"));
Assert.assertTrue(shexc.getOutput().contains("hello"));
Assert.assertTrue(shexc.getExitCode() == 2);
}
finally {
if (shellFile != null && shellFile.exists()) {
shellFile.delete();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* See if environment variable is forwarded using sanitizeEnv.
* @throws Exception
*/
@Test(timeout=60000) public void testContainerEnvVariables() throws Exception {
containerManager.start();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId appId=ApplicationId.newInstance(0,0);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId cId=ContainerId.newInstance(appAttemptId,0);
Map userSetEnv=new HashMap();
userSetEnv.put(Environment.CONTAINER_ID.name(),"user_set_container_id");
userSetEnv.put(Environment.NM_HOST.name(),"user_set_NM_HOST");
userSetEnv.put(Environment.NM_PORT.name(),"user_set_NM_PORT");
userSetEnv.put(Environment.NM_HTTP_PORT.name(),"user_set_NM_HTTP_PORT");
userSetEnv.put(Environment.LOCAL_DIRS.name(),"user_set_LOCAL_DIR");
userSetEnv.put(Environment.USER.key(),"user_set_" + Environment.USER.key());
userSetEnv.put(Environment.LOGNAME.name(),"user_set_LOGNAME");
userSetEnv.put(Environment.PWD.name(),"user_set_PWD");
userSetEnv.put(Environment.HOME.name(),"user_set_HOME");
containerLaunchContext.setEnvironment(userSetEnv);
File scriptFile=Shell.appendScriptExtension(tmpDir,"scriptFile");
PrintWriter fileWriter=new PrintWriter(scriptFile);
File processStartFile=new File(tmpDir,"env_vars.txt").getAbsoluteFile();
if (Shell.WINDOWS) {
fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> "+ processStartFile);
fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.USER.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.PWD.$() + ">> "+ processStartFile);
fileWriter.println("@echo " + Environment.HOME.$() + ">> "+ processStartFile);
for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
fileWriter.println("@echo %" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ "%>> "+ processStartFile);
}
fileWriter.println("@echo " + cId + ">> "+ processStartFile);
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
}
else {
fileWriter.write("\numask 0");
fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > "+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.USER.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.PWD.name() + " >> "+ processStartFile);
fileWriter.write("\necho $" + Environment.HOME.name() + " >> "+ processStartFile);
for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ " >> "+ processStartFile);
}
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List commands=Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,Priority.newInstance(0),0));
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs=0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists());
List localDirs=dirsHandler.getLocalDirs();
List logDirs=dirsHandler.getLogDirs();
List appDirs=new ArrayList(localDirs.size());
for ( String localDir : localDirs) {
Path usersdir=new Path(localDir,ContainerLocalizer.USERCACHE);
Path userdir=new Path(usersdir,user);
Path appsdir=new Path(userdir,ContainerLocalizer.APPCACHE);
appDirs.add(new Path(appsdir,appId.toString()));
}
List containerLogDirs=new ArrayList();
String relativeContainerLogDir=ContainerLaunch.getRelativeContainerLogDir(appId.toString(),cId.toString());
for ( String logDir : logDirs) {
containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir);
}
BufferedReader reader=new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals(cId.toString(),reader.readLine());
Assert.assertEquals(context.getNodeId().getHost(),reader.readLine());
Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),reader.readLine());
Assert.assertEquals(String.valueOf(HTTP_PORT),reader.readLine());
Assert.assertEquals(StringUtils.join(",",appDirs),reader.readLine());
Assert.assertEquals(user,reader.readLine());
Assert.assertEquals(user,reader.readLine());
String obtainedPWD=reader.readLine();
boolean found=false;
for ( Path localDir : appDirs) {
if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) {
found=true;
break;
}
}
Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found);
Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),reader.readLine());
for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) {
Assert.assertEquals(containerManager.getAuxServiceMetaData().get(serviceName),ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes())));
}
Assert.assertEquals(cId.toString(),containerLaunchContext.getEnvironment().get(Environment.CONTAINER_ID.name()));
Assert.assertEquals(context.getNodeId().getHost(),containerLaunchContext.getEnvironment().get(Environment.NM_HOST.name()));
Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),containerLaunchContext.getEnvironment().get(Environment.NM_PORT.name()));
Assert.assertEquals(String.valueOf(HTTP_PORT),containerLaunchContext.getEnvironment().get(Environment.NM_HTTP_PORT.name()));
Assert.assertEquals(StringUtils.join(",",appDirs),containerLaunchContext.getEnvironment().get(Environment.LOCAL_DIRS.name()));
Assert.assertEquals(StringUtils.join(",",containerLogDirs),containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name()));
Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.USER.name()));
Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.LOGNAME.name()));
found=false;
obtainedPWD=containerLaunchContext.getEnvironment().get(Environment.PWD.name());
for ( Path localDir : appDirs) {
if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) {
found=true;
break;
}
}
Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found);
Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),containerLaunchContext.getEnvironment().get(Environment.HOME.name()));
String pid=reader.readLine().trim();
Assert.assertEquals(null,reader.readLine());
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid));
List containerIds=new ArrayList();
containerIds.add(cId);
StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds);
containerManager.stopContainers(stopRequest);
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE);
GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
int expectedExitCode=ContainerExitStatus.KILLED_BY_APPMASTER;
Assert.assertEquals(expectedExitCode,containerStatus.getExitStatus());
Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(pid));
}
IterativeVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=1000) public void testDirectoryStateChangeFromFullToNonFull(){
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"40");
LocalCacheDirectoryManager dir=new LocalCacheDirectoryManager(conf);
String rootPath="";
String firstSubDir="0";
for (int i=0; i < 4; i++) {
Assert.assertEquals(rootPath,dir.getRelativePathForLocalization());
}
dir.decrementFileCountForPath(rootPath);
dir.decrementFileCountForPath(rootPath);
Assert.assertEquals(rootPath,dir.getRelativePathForLocalization());
Assert.assertEquals(rootPath,dir.getRelativePathForLocalization());
Assert.assertEquals(firstSubDir,dir.getRelativePathForLocalization());
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testHierarchicalSubDirectoryCreation(){
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37");
LocalCacheDirectoryManager hDir=new LocalCacheDirectoryManager(conf);
Assert.assertTrue(hDir.getRelativePathForLocalization().isEmpty());
for (int i=1; i <= 37 * 36 * 36; i++) {
StringBuffer sb=new StringBuffer();
String num=Integer.toString(i - 1,36);
if (num.length() == 1) {
sb.append(num.charAt(0));
}
else {
sb.append(Integer.toString(Integer.parseInt(num.substring(0,1),36) - 1,36));
}
for (int j=1; j < num.length(); j++) {
sb.append(Path.SEPARATOR).append(num.charAt(j));
}
Assert.assertEquals(sb.toString(),hDir.getRelativePathForLocalization());
}
String testPath1="4";
String testPath2="2";
hDir.decrementFileCountForPath(testPath1);
hDir.decrementFileCountForPath(testPath2);
Assert.assertEquals(testPath1,hDir.getRelativePathForLocalization());
Assert.assertEquals(testPath2,hDir.getRelativePathForLocalization());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testIncrementFileCountForPath(){
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,LocalCacheDirectoryManager.DIRECTORIES_PER_LEVEL + 2);
LocalCacheDirectoryManager mgr=new LocalCacheDirectoryManager(conf);
final String rootPath="";
mgr.incrementFileCountForPath(rootPath);
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
Assert.assertFalse("root dir should be full",rootPath.equals(mgr.getRelativePathForLocalization()));
mgr.getRelativePathForLocalization();
mgr.decrementFileCountForPath(rootPath);
mgr.decrementFileCountForPath(rootPath);
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization());
String otherDir=mgr.getRelativePathForLocalization();
Assert.assertFalse("root dir should be full",otherDir.equals(rootPath));
final String deepDir0="d/e/e/p/0";
final String deepDir1="d/e/e/p/1";
final String deepDir2="d/e/e/p/2";
final String deepDir3="d/e/e/p/3";
mgr.incrementFileCountForPath(deepDir0);
Assert.assertEquals(otherDir,mgr.getRelativePathForLocalization());
Assert.assertEquals(deepDir0,mgr.getRelativePathForLocalization());
Assert.assertEquals("total dir count incorrect after increment",deepDir1,mgr.getRelativePathForLocalization());
mgr.incrementFileCountForPath(deepDir2);
mgr.incrementFileCountForPath(deepDir1);
mgr.incrementFileCountForPath(deepDir2);
Assert.assertEquals(deepDir3,mgr.getRelativePathForLocalization());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testResourceOrder() throws URISyntaxException {
Random r=new Random();
long seed=r.nextLong();
r.setSeed(seed);
System.out.println("SEED: " + seed);
long basetime=r.nextLong() >>> 2;
org.apache.hadoop.yarn.api.records.LocalResource yA=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,FILE,PUBLIC,"^/foo/.*");
final LocalResourceRequest a=new LocalResourceRequest(yA);
org.apache.hadoop.yarn.api.records.LocalResource yB=getYarnResource(new Path("http://yak.org:80/foobaz"),-1,basetime,FILE,PUBLIC,"^/foo/.*");
LocalResourceRequest b=new LocalResourceRequest(yB);
assertTrue(0 > a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime + 1,FILE,PUBLIC,"^/foo/.*");
b=new LocalResourceRequest(yB);
assertTrue(0 > a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,ARCHIVE,PUBLIC,"^/foo/.*");
b=new LocalResourceRequest(yB);
assertTrue(0 != a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,ARCHIVE,PUBLIC,"^/food/.*");
b=new LocalResourceRequest(yB);
assertTrue(0 != a.compareTo(b));
yB=getYarnResource(new Path("http://yak.org:80/foobar"),-1,basetime,ARCHIVE,PUBLIC,null);
b=new LocalResourceRequest(yB);
assertTrue(0 != a.compareTo(b));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testStateStoreSuccessfulLocalization() throws Exception {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(1,1);
final Path localDir=new Path("/tmp");
Configuration conf=new YarnConfiguration();
DrainDispatcher dispatcher=null;
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
DeletionService mockDelService=mock(DeletionService.class);
NMStateStoreService stateStore=mock(NMStateStoreService.class);
try {
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.APPLICATION,lc1);
tracker.handle(reqEvent1);
dispatcher.await();
Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir);
ArgumentCaptor localResourceCaptor=ArgumentCaptor.forClass(LocalResourceProto.class);
ArgumentCaptor pathCaptor=ArgumentCaptor.forClass(Path.class);
verify(stateStore).startResourceLocalization(eq(user),eq(appId),localResourceCaptor.capture(),pathCaptor.capture());
LocalResourceProto lrProto=localResourceCaptor.getValue();
Path localizedPath1=pathCaptor.getValue();
Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(lrProto)));
Assert.assertEquals(hierarchicalPath1,localizedPath1.getParent());
ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,pathCaptor.getValue(),120);
tracker.handle(rle1);
dispatcher.await();
ArgumentCaptor localizedProtoCaptor=ArgumentCaptor.forClass(LocalizedResourceProto.class);
verify(stateStore).finishResourceLocalization(eq(user),eq(appId),localizedProtoCaptor.capture());
LocalizedResourceProto localizedProto=localizedProtoCaptor.getValue();
Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(localizedProto.getResource())));
Assert.assertEquals(localizedPath1.toString(),localizedProto.getLocalPath());
LocalizedResource localizedRsrc1=tracker.getLocalizedResource(lr1);
Assert.assertNotNull(localizedRsrc1);
tracker.handle(new ResourceReleaseEvent(lr1,cId1));
dispatcher.await();
boolean removeResult=tracker.remove(localizedRsrc1,mockDelService);
Assert.assertTrue(removeResult);
verify(stateStore).removeLocalizedResource(eq(user),eq(appId),eq(localizedPath1));
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void test(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
DeletionService mockDelService=mock(DeletionService.class);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ContainerId cId2=BuilderUtils.newContainerId(1,1,1,2);
LocalizerContext lc2=new LocalizerContext(user,cId2,null);
LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
LocalResourceRequest req2=createLocalResourceRequest(user,2,1,LocalResourceVisibility.PUBLIC);
LocalizedResource lr1=createLocalizedResource(req1,dispatcher);
LocalizedResource lr2=createLocalizedResource(req2,dispatcher);
ConcurrentMap localrsrc=new ConcurrentHashMap();
localrsrc.put(req1,lr1);
localrsrc.put(req2,lr2);
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService());
ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1);
ResourceEvent req12Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc2);
ResourceEvent req21Event=new ResourceRequestEvent(req2,LocalResourceVisibility.PUBLIC,lc1);
ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1);
ResourceEvent rel12Event=new ResourceReleaseEvent(req1,cId2);
ResourceEvent rel21Event=new ResourceReleaseEvent(req2,cId1);
tracker.handle(req11Event);
tracker.handle(req12Event);
tracker.handle(req21Event);
dispatcher.await();
verify(localizerEventHandler,times(3)).handle(any(LocalizerResourceRequestEvent.class));
Assert.assertEquals(2,lr1.getRefCount());
Assert.assertEquals(1,lr2.getRefCount());
tracker.handle(rel21Event);
dispatcher.await();
verifyTrackedResourceCount(tracker,2);
Assert.assertEquals(2,lr1.getRefCount());
Assert.assertFalse(tracker.remove(lr1,mockDelService));
verifyTrackedResourceCount(tracker,2);
ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1);
lr1.handle(rle);
Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED));
tracker.handle(rel11Event);
tracker.handle(rel12Event);
Assert.assertEquals(0,lr1.getRefCount());
Assert.assertTrue(tracker.remove(lr1,mockDelService));
verifyTrackedResourceCount(tracker,1);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testRecoveredResourceWithDirCacheMgr() throws Exception {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(1,1);
final Path localDirRoot=new Path("/tmp/localdir");
Configuration conf=new YarnConfiguration();
DrainDispatcher dispatcher=null;
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
NMStateStoreService stateStore=mock(NMStateStoreService.class);
try {
LocalResourcesTrackerImpl tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,true,conf,stateStore);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
Assert.assertNull(tracker.getLocalizedResource(lr1));
final long localizedId1=52;
Path hierarchicalPath1=new Path(localDirRoot + "/4/2",Long.toString(localizedId1));
Path localizedPath1=new Path(hierarchicalPath1,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr1,localizedPath1,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr1));
LocalCacheDirectoryManager dirMgrRoot=tracker.getDirectoryManager(localDirRoot);
Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount());
Assert.assertEquals(1,dirMgrRoot.getDirectory("4/2").getCount());
LocalResourceRequest lr2=createLocalResourceRequest(user,2,2,LocalResourceVisibility.PUBLIC);
Assert.assertNull(tracker.getLocalizedResource(lr2));
final long localizedId2=localizedId1 + 1;
Path hierarchicalPath2=new Path(localDirRoot + "/4/2",Long.toString(localizedId2));
Path localizedPath2=new Path(hierarchicalPath2,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr2,localizedPath2,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr2));
Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount());
Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount());
LocalResourceRequest lr3=createLocalResourceRequest(user,3,3,LocalResourceVisibility.PUBLIC);
Assert.assertNull(tracker.getLocalizedResource(lr3));
final long localizedId3=128;
Path hierarchicalPath3=new Path(localDirRoot + "/4/3",Long.toString(localizedId3));
Path localizedPath3=new Path(hierarchicalPath3,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr3,localizedPath3,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr3));
Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount());
Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount());
Assert.assertEquals(1,dirMgrRoot.getDirectory("4/3").getCount());
LocalResourceRequest lr4=createLocalResourceRequest(user,4,4,LocalResourceVisibility.PUBLIC);
Assert.assertNull(tracker.getLocalizedResource(lr4));
final long localizedId4=256;
Path hierarchicalPath4=new Path(localDirRoot + "/4",Long.toString(localizedId4));
Path localizedPath4=new Path(hierarchicalPath4,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr4,localizedPath4,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr4));
Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount());
Assert.assertEquals(1,dirMgrRoot.getDirectory("4").getCount());
Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount());
Assert.assertEquals(1,dirMgrRoot.getDirectory("4/3").getCount());
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testRecoveredResource() throws Exception {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(1,1);
final Path localDir=new Path("/tmp/localdir");
Configuration conf=new YarnConfiguration();
DrainDispatcher dispatcher=null;
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
NMStateStoreService stateStore=mock(NMStateStoreService.class);
try {
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION);
Assert.assertNull(tracker.getLocalizedResource(lr1));
final long localizedId1=52;
Path hierarchicalPath1=new Path(localDir,Long.toString(localizedId1));
Path localizedPath1=new Path(hierarchicalPath1,"resource.jar");
tracker.handle(new ResourceRecoveredEvent(lr1,localizedPath1,120));
dispatcher.await();
Assert.assertNotNull(tracker.getLocalizedResource(lr1));
LocalResourceRequest lr2=createLocalResourceRequest(user,2,2,LocalResourceVisibility.APPLICATION);
LocalizerContext lc2=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent2=new ResourceRequestEvent(lr2,LocalResourceVisibility.APPLICATION,lc2);
tracker.handle(reqEvent2);
dispatcher.await();
Path hierarchicalPath2=tracker.getPathForLocalization(lr2,localDir);
long localizedId2=Long.parseLong(hierarchicalPath2.getName());
Assert.assertEquals(localizedId1 + 1,localizedId2);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testStateStoreFailedLocalization() throws Exception {
final String user="someuser";
final ApplicationId appId=ApplicationId.newInstance(1,1);
final Path localDir=new Path("/tmp");
Configuration conf=new YarnConfiguration();
DrainDispatcher dispatcher=null;
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
NMStateStoreService stateStore=mock(NMStateStoreService.class);
try {
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.APPLICATION,lc1);
tracker.handle(reqEvent1);
dispatcher.await();
Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir);
ArgumentCaptor localResourceCaptor=ArgumentCaptor.forClass(LocalResourceProto.class);
ArgumentCaptor pathCaptor=ArgumentCaptor.forClass(Path.class);
verify(stateStore).startResourceLocalization(eq(user),eq(appId),localResourceCaptor.capture(),pathCaptor.capture());
LocalResourceProto lrProto=localResourceCaptor.getValue();
Path localizedPath1=pathCaptor.getValue();
Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(lrProto)));
Assert.assertEquals(hierarchicalPath1,localizedPath1.getParent());
ResourceFailedLocalizationEvent rfe1=new ResourceFailedLocalizationEvent(lr1,new Exception("Test").toString());
tracker.handle(rfe1);
dispatcher.await();
verify(stateStore).removeLocalizedResource(eq(user),eq(appId),eq(localizedPath1));
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testHierarchicalLocalCacheDirectories(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37");
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
DeletionService mockDelService=mock(DeletionService.class);
ConcurrentMap localrsrc=new ConcurrentHashMap();
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,true,conf,new NMNullStateStoreService());
Path localDir=new Path("/tmp");
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.PUBLIC,lc1);
tracker.handle(reqEvent1);
Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir).getParent();
ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,new Path(hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "file1"),120);
tracker.handle(rle1);
LocalResourceRequest lr2=createLocalResourceRequest(user,3,3,LocalResourceVisibility.PUBLIC);
ResourceEvent reqEvent2=new ResourceRequestEvent(lr2,LocalResourceVisibility.PUBLIC,lc1);
tracker.handle(reqEvent2);
Path hierarchicalPath2=tracker.getPathForLocalization(lr2,localDir).getParent();
ResourceFailedLocalizationEvent rfe2=new ResourceFailedLocalizationEvent(lr2,new Exception("Test").toString());
tracker.handle(rfe2);
Assert.assertNotSame(hierarchicalPath1,hierarchicalPath2);
LocalResourceRequest lr3=createLocalResourceRequest(user,2,2,LocalResourceVisibility.PUBLIC);
ResourceEvent reqEvent3=new ResourceRequestEvent(lr3,LocalResourceVisibility.PUBLIC,lc1);
tracker.handle(reqEvent3);
Path hierarchicalPath3=tracker.getPathForLocalization(lr3,localDir).getParent();
ResourceLocalizedEvent rle3=new ResourceLocalizedEvent(lr3,new Path(hierarchicalPath3.toUri().toString() + Path.SEPARATOR + "file3"),120);
tracker.handle(rle3);
Assert.assertEquals(hierarchicalPath3.toUri().toString(),hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "0");
ResourceEvent relEvent1=new ResourceReleaseEvent(lr1,cId1);
tracker.handle(relEvent1);
int resources=0;
Iterator iter=tracker.iterator();
while (iter.hasNext()) {
iter.next();
resources++;
}
Assert.assertEquals(2,resources);
iter=tracker.iterator();
while (iter.hasNext()) {
LocalizedResource rsrc=iter.next();
if (rsrc.getRefCount() == 0) {
Assert.assertTrue(tracker.remove(rsrc,mockDelService));
resources--;
}
}
Assert.assertEquals(1,resources);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testConsistency(){
String user="testuser";
DrainDispatcher dispatcher=null;
try {
Configuration conf=new Configuration();
dispatcher=createDispatcher(conf);
EventHandler localizerEventHandler=mock(EventHandler.class);
EventHandler containerEventHandler=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerEventHandler);
dispatcher.register(ContainerEventType.class,containerEventHandler);
ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1);
LocalizerContext lc1=new LocalizerContext(user,cId1,null);
LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC);
LocalizedResource lr1=createLocalizedResource(req1,dispatcher);
ConcurrentMap localrsrc=new ConcurrentHashMap();
localrsrc.put(req1,lr1);
LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService());
ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1);
ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1);
tracker.handle(req11Event);
dispatcher.await();
Assert.assertEquals(1,lr1.getRefCount());
dispatcher.await();
verifyTrackedResourceCount(tracker,1);
ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1);
lr1.handle(rle);
Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED));
Assert.assertTrue(createdummylocalizefile(new Path("file:///tmp/r1")));
LocalizedResource rsrcbefore=tracker.iterator().next();
File resFile=new File(lr1.getLocalPath().toUri().getRawPath().toString());
Assert.assertTrue(resFile.exists());
Assert.assertTrue(resFile.delete());
tracker.handle(req11Event);
dispatcher.await();
lr1.handle(rle);
Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED));
LocalizedResource rsrcafter=tracker.iterator().next();
if (rsrcbefore == rsrcafter) {
Assert.fail("Localized resource should not be equal");
}
tracker.handle(rel11Event);
}
finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testNotification() throws Exception {
DrainDispatcher dispatcher=new DrainDispatcher();
dispatcher.init(new Configuration());
try {
dispatcher.start();
EventHandler containerBus=mock(EventHandler.class);
EventHandler localizerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
dispatcher.register(LocalizerEventType.class,localizerBus);
LocalResource apiRsrc=createMockResource();
final ContainerId container0=getMockContainer(0);
final Credentials creds0=new Credentials();
final LocalResourceVisibility vis0=LocalResourceVisibility.PRIVATE;
final LocalizerContext ctxt0=new LocalizerContext("yak",container0,creds0);
LocalResourceRequest rsrcA=new LocalResourceRequest(apiRsrc);
LocalizedResource local=new LocalizedResource(rsrcA,dispatcher);
local.handle(new ResourceRequestEvent(rsrcA,vis0,ctxt0));
dispatcher.await();
LocalizerEventMatcher matchesL0Req=new LocalizerEventMatcher(container0,creds0,vis0,LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION);
verify(localizerBus).handle(argThat(matchesL0Req));
assertEquals(ResourceState.DOWNLOADING,local.getState());
final Credentials creds1=new Credentials();
final ContainerId container1=getMockContainer(1);
final LocalizerContext ctxt1=new LocalizerContext("yak",container1,creds1);
final LocalResourceVisibility vis1=LocalResourceVisibility.PUBLIC;
local.handle(new ResourceRequestEvent(rsrcA,vis1,ctxt1));
dispatcher.await();
LocalizerEventMatcher matchesL1Req=new LocalizerEventMatcher(container1,creds1,vis1,LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION);
verify(localizerBus).handle(argThat(matchesL1Req));
local.handle(new ResourceReleaseEvent(rsrcA,container0));
dispatcher.await();
verify(containerBus,never()).handle(isA(ContainerEvent.class));
assertEquals(ResourceState.DOWNLOADING,local.getState());
local.handle(new ResourceReleaseEvent(rsrcA,container1));
dispatcher.await();
verify(containerBus,never()).handle(isA(ContainerEvent.class));
assertEquals(ResourceState.DOWNLOADING,local.getState());
final ContainerId container2=getMockContainer(2);
final LocalResourceVisibility vis2=LocalResourceVisibility.PRIVATE;
final Credentials creds2=new Credentials();
final LocalizerContext ctxt2=new LocalizerContext("yak",container2,creds2);
final ContainerId container3=getMockContainer(3);
final LocalResourceVisibility vis3=LocalResourceVisibility.PRIVATE;
final Credentials creds3=new Credentials();
final LocalizerContext ctxt3=new LocalizerContext("yak",container3,creds3);
local.handle(new ResourceRequestEvent(rsrcA,vis2,ctxt2));
local.handle(new ResourceRequestEvent(rsrcA,vis3,ctxt3));
dispatcher.await();
LocalizerEventMatcher matchesL2Req=new LocalizerEventMatcher(container2,creds2,vis2,LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION);
verify(localizerBus).handle(argThat(matchesL2Req));
LocalizerEventMatcher matchesL3Req=new LocalizerEventMatcher(container3,creds3,vis3,LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION);
verify(localizerBus).handle(argThat(matchesL3Req));
Path locA=new Path("file:///cache/rsrcA");
local.handle(new ResourceLocalizedEvent(rsrcA,locA,10));
dispatcher.await();
ContainerEventMatcher matchesC2Localized=new ContainerEventMatcher(container2,ContainerEventType.RESOURCE_LOCALIZED);
ContainerEventMatcher matchesC3Localized=new ContainerEventMatcher(container3,ContainerEventType.RESOURCE_LOCALIZED);
verify(containerBus).handle(argThat(matchesC2Localized));
verify(containerBus).handle(argThat(matchesC3Localized));
assertEquals(ResourceState.LOCALIZED,local.getState());
final ContainerId container4=getMockContainer(4);
final Credentials creds4=new Credentials();
final LocalizerContext ctxt4=new LocalizerContext("yak",container4,creds4);
final LocalResourceVisibility vis4=LocalResourceVisibility.PRIVATE;
local.handle(new ResourceRequestEvent(rsrcA,vis4,ctxt4));
dispatcher.await();
ContainerEventMatcher matchesC4Localized=new ContainerEventMatcher(container4,ContainerEventType.RESOURCE_LOCALIZED);
verify(containerBus).handle(argThat(matchesC4Localized));
assertEquals(ResourceState.LOCALIZED,local.getState());
}
finally {
dispatcher.stop();
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testRecovery() throws Exception {
final String user1="user1";
final String user2="user2";
final ApplicationId appId1=ApplicationId.newInstance(1,1);
final ApplicationId appId2=ApplicationId.newInstance(1,2);
List localDirs=new ArrayList();
String[] sDirs=new String[4];
for (int i=0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
DrainDispatcher dispatcher=new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
EventHandler applicationBus=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
EventHandler localizerBus=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerBus);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
ResourceLocalizationService spyService=createSpyService(dispatcher,dirsHandler,stateStore);
try {
spyService.init(conf);
spyService.start();
final Application app1=mock(Application.class);
when(app1.getUser()).thenReturn(user1);
when(app1.getAppId()).thenReturn(appId1);
final Application app2=mock(Application.class);
when(app2.getUser()).thenReturn(user2);
when(app2.getAppId()).thenReturn(appId2);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app1));
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app2));
dispatcher.await();
LocalResourcesTracker appTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user1,appId1);
LocalResourcesTracker privTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user1,null);
LocalResourcesTracker appTracker2=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user2,appId2);
LocalResourcesTracker pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,null,null);
final Container c1=getMockContainer(appId1,1,user1);
final Container c2=getMockContainer(appId2,2,user2);
Random r=new Random();
long seed=r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
final LocalResource privResource1=getPrivateMockedResource(r);
final LocalResourceRequest privReq1=new LocalResourceRequest(privResource1);
final LocalResource privResource2=getPrivateMockedResource(r);
final LocalResourceRequest privReq2=new LocalResourceRequest(privResource2);
final LocalResource pubResource1=getPublicMockedResource(r);
final LocalResourceRequest pubReq1=new LocalResourceRequest(pubResource1);
final LocalResource pubResource2=getPublicMockedResource(r);
final LocalResourceRequest pubReq2=new LocalResourceRequest(pubResource2);
final LocalResource appResource1=getAppMockedResource(r);
final LocalResourceRequest appReq1=new LocalResourceRequest(appResource1);
final LocalResource appResource2=getAppMockedResource(r);
final LocalResourceRequest appReq2=new LocalResourceRequest(appResource2);
final LocalResource appResource3=getAppMockedResource(r);
final LocalResourceRequest appReq3=new LocalResourceRequest(appResource3);
Map> req1=new HashMap>();
req1.put(LocalResourceVisibility.PRIVATE,Arrays.asList(new LocalResourceRequest[]{privReq1,privReq2}));
req1.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq1));
req1.put(LocalResourceVisibility.APPLICATION,Collections.singletonList(appReq1));
Map> req2=new HashMap>();
req2.put(LocalResourceVisibility.APPLICATION,Arrays.asList(new LocalResourceRequest[]{appReq2,appReq3}));
req2.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq2));
spyService.handle(new ContainerLocalizationRequestEvent(c1,req1));
spyService.handle(new ContainerLocalizationRequestEvent(c2,req2));
dispatcher.await();
privTracker1.getPathForLocalization(privReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1));
privTracker1.getPathForLocalization(privReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1));
LocalizedResource privLr1=privTracker1.getLocalizedResource(privReq1);
LocalizedResource privLr2=privTracker1.getLocalizedResource(privReq2);
appTracker1.getPathForLocalization(appReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId1));
LocalizedResource appLr1=appTracker1.getLocalizedResource(appReq1);
appTracker2.getPathForLocalization(appReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2));
LocalizedResource appLr2=appTracker2.getLocalizedResource(appReq2);
appTracker2.getPathForLocalization(appReq3,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2));
LocalizedResource appLr3=appTracker2.getLocalizedResource(appReq3);
pubTracker.getPathForLocalization(pubReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE));
LocalizedResource pubLr1=pubTracker.getLocalizedResource(pubReq1);
pubTracker.getPathForLocalization(pubReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE));
LocalizedResource pubLr2=pubTracker.getLocalizedResource(pubReq2);
assertNotNull("Localization not started",privLr1.getLocalPath());
privTracker1.handle(new ResourceLocalizedEvent(privReq1,privLr1.getLocalPath(),privLr1.getSize() + 5));
assertNotNull("Localization not started",privLr2.getLocalPath());
privTracker1.handle(new ResourceLocalizedEvent(privReq2,privLr2.getLocalPath(),privLr2.getSize() + 10));
assertNotNull("Localization not started",appLr1.getLocalPath());
appTracker1.handle(new ResourceLocalizedEvent(appReq1,appLr1.getLocalPath(),appLr1.getSize()));
assertNotNull("Localization not started",appLr3.getLocalPath());
appTracker2.handle(new ResourceLocalizedEvent(appReq3,appLr3.getLocalPath(),appLr3.getSize() + 7));
assertNotNull("Localization not started",pubLr1.getLocalPath());
pubTracker.handle(new ResourceLocalizedEvent(pubReq1,pubLr1.getLocalPath(),pubLr1.getSize() + 1000));
assertNotNull("Localization not started",pubLr2.getLocalPath());
pubTracker.handle(new ResourceLocalizedEvent(pubReq2,pubLr2.getLocalPath(),pubLr2.getSize() + 99999));
dispatcher.await();
assertEquals(ResourceState.LOCALIZED,privLr1.getState());
assertEquals(ResourceState.LOCALIZED,privLr2.getState());
assertEquals(ResourceState.LOCALIZED,appLr1.getState());
assertEquals(ResourceState.DOWNLOADING,appLr2.getState());
assertEquals(ResourceState.LOCALIZED,appLr3.getState());
assertEquals(ResourceState.LOCALIZED,pubLr1.getState());
assertEquals(ResourceState.LOCALIZED,pubLr2.getState());
spyService=createSpyService(dispatcher,dirsHandler,stateStore);
spyService.init(conf);
spyService.recoverLocalizedResources(stateStore.loadLocalizationState());
dispatcher.await();
appTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user1,appId1);
privTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user1,null);
appTracker2=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user2,appId2);
pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,null,null);
LocalizedResource recoveredRsrc=privTracker1.getLocalizedResource(privReq1);
assertEquals(privReq1,recoveredRsrc.getRequest());
assertEquals(privLr1.getLocalPath(),recoveredRsrc.getLocalPath());
assertEquals(privLr1.getSize(),recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState());
recoveredRsrc=privTracker1.getLocalizedResource(privReq2);
assertEquals(privReq2,recoveredRsrc.getRequest());
assertEquals(privLr2.getLocalPath(),recoveredRsrc.getLocalPath());
assertEquals(privLr2.getSize(),recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState());
recoveredRsrc=appTracker1.getLocalizedResource(appReq1);
assertEquals(appReq1,recoveredRsrc.getRequest());
assertEquals(appLr1.getLocalPath(),recoveredRsrc.getLocalPath());
assertEquals(appLr1.getSize(),recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState());
recoveredRsrc=appTracker2.getLocalizedResource(appReq2);
assertNull("in-progress resource should not be present",recoveredRsrc);
recoveredRsrc=appTracker2.getLocalizedResource(appReq3);
assertEquals(appReq3,recoveredRsrc.getRequest());
assertEquals(appLr3.getLocalPath(),recoveredRsrc.getLocalPath());
assertEquals(appLr3.getSize(),recoveredRsrc.getSize());
assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState());
}
finally {
dispatcher.stop();
stateStore.close();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPrivateResource() throws Exception {
DrainDispatcher dispatcher1=null;
try {
dispatcher1=new DrainDispatcher();
String user="testuser";
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
List localDirs=new ArrayList();
String[] sDirs=new String[1];
for (int i=0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService();
localDirHandler.init(conf);
EventHandler applicationBus=mock(EventHandler.class);
dispatcher1.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService());
dispatcher1.register(LocalizationEventType.class,rls);
rls.init(conf);
rls.handle(createApplicationLocalizationEvent(user,appId));
LocalResourceRequest req=new LocalResourceRequest(new Path("file:///tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,"");
ContainerImpl container1=createMockContainer(user,1);
String localizerId1=container1.getContainerId().toString();
rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1));
LocalizerRunner localizerRunner1=rls.getLocalizerRunner(localizerId1);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PRIVATE,req));
Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,1,200));
ContainerImpl container2=createMockContainer(user,2);
String localizerId2=container2.getContainerId().toString();
rls.getPrivateLocalizers().put(localizerId2,rls.new LocalizerRunner(new LocalizerContext(user,container2.getContainerId(),null),localizerId2));
LocalizerRunner localizerRunner2=rls.getLocalizerRunner(localizerId2);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PRIVATE,req));
Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId2,1,200));
LocalResourcesTracker tracker=rls.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user,appId);
LocalizedResource lr=tracker.getLocalizedResource(req);
Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState());
Assert.assertEquals(1,lr.sem.availablePermits());
LocalizerHeartbeatResponse response1=rls.heartbeat(createLocalizerStatus(localizerId1));
Assert.assertEquals(1,localizerRunner1.scheduled.size());
Assert.assertEquals(req.getResource(),response1.getResourceSpecs().get(0).getResource().getResource());
Assert.assertEquals(0,lr.sem.availablePermits());
LocalizerHeartbeatResponse response2=rls.heartbeat(createLocalizerStatus(localizerId2));
Assert.assertEquals(0,localizerRunner2.scheduled.size());
Assert.assertEquals(0,response2.getResourceSpecs().size());
rls.heartbeat(createLocalizerStatusForFailedResource(localizerId1,req));
Assert.assertTrue(waitForResourceState(lr,rls,req,LocalResourceVisibility.PRIVATE,user,appId,ResourceState.FAILED,200));
Assert.assertTrue(lr.getState().equals(ResourceState.FAILED));
Assert.assertEquals(0,localizerRunner1.scheduled.size());
response2=rls.heartbeat(createLocalizerStatus(localizerId2));
Assert.assertEquals(0,localizerRunner2.scheduled.size());
Assert.assertEquals(0,localizerRunner2.pending.size());
Assert.assertEquals(0,response2.getResourceSpecs().size());
}
finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPublicResource() throws Exception {
DrainDispatcher dispatcher1=null;
String user="testuser";
try {
List localDirs=new ArrayList();
String[] sDirs=new String[1];
for (int i=0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
EventHandler applicationBus=mock(EventHandler.class);
dispatcher1=new DrainDispatcher();
dispatcher1.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher1,exec,delService,dirsHandler,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
dispatcher1.register(LocalizationEventType.class,spyService);
spyService.init(conf);
Assert.assertEquals(0,spyService.getPublicLocalizer().pending.size());
LocalResourceRequest req=new LocalResourceRequest(new Path("/tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,"");
ApplicationImpl app=mock(ApplicationImpl.class);
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
when(app.getAppId()).thenReturn(appId);
when(app.getUser()).thenReturn(user);
dispatcher1.getEventHandler().handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
ContainerImpl container1=createMockContainer(user,1);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PUBLIC,req));
Assert.assertTrue(waitForResourceState(null,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.DOWNLOADING,200));
Assert.assertTrue(waitForPublicDownloadToStart(spyService,1,200));
LocalizedResource lr=getLocalizedResource(spyService,req,LocalResourceVisibility.PUBLIC,user,null);
Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState());
Assert.assertEquals(1,spyService.getPublicLocalizer().pending.size());
Assert.assertEquals(0,lr.sem.availablePermits());
ContainerImpl container2=createMockContainer(user,2);
dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PUBLIC,req));
Assert.assertFalse(waitForPublicDownloadToStart(spyService,2,100));
ResourceFailedLocalizationEvent locFailedEvent=new ResourceFailedLocalizationEvent(req,new Exception("test").toString());
spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,null).handle(locFailedEvent);
Assert.assertTrue(waitForResourceState(lr,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.FAILED,200));
lr.unlock();
spyService.getPublicLocalizer().pending.clear();
LocalizerResourceRequestEvent localizerEvent=new LocalizerResourceRequestEvent(lr,null,mock(LocalizerContext.class),null);
dispatcher1.getEventHandler().handle(localizerEvent);
Assert.assertFalse(waitForPublicDownloadToStart(spyService,1,100));
Assert.assertEquals(1,lr.sem.availablePermits());
}
finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalResourcePath() throws Exception {
DrainDispatcher dispatcher1=null;
try {
dispatcher1=new DrainDispatcher();
String user="testuser";
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
List localDirs=new ArrayList();
String[] sDirs=new String[1];
for (int i=0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService();
localDirHandler.init(conf);
EventHandler applicationBus=mock(EventHandler.class);
dispatcher1.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService());
dispatcher1.register(LocalizationEventType.class,rls);
rls.init(conf);
rls.handle(createApplicationLocalizationEvent(user,appId));
Container container1=createMockContainer(user,1);
String localizerId1=container1.getContainerId().toString();
rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1));
LocalResourceRequest reqPriv=new LocalResourceRequest(new Path("file:///tmp1"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,"");
List privList=new ArrayList();
privList.add(reqPriv);
LocalResourceRequest reqApp=new LocalResourceRequest(new Path("file:///tmp2"),123L,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,"");
List appList=new ArrayList();
appList.add(reqApp);
Map> rsrcs=new HashMap>();
rsrcs.put(LocalResourceVisibility.APPLICATION,appList);
rsrcs.put(LocalResourceVisibility.PRIVATE,privList);
dispatcher1.getEventHandler().handle(new ContainerLocalizationRequestEvent(container1,rsrcs));
Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,2,500));
String userCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.FILECACHE));
String userAppCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.APPCACHE,appId.toString(),ContainerLocalizer.FILECACHE));
int returnedResources=0;
boolean appRsrc=false, privRsrc=false;
while (returnedResources < 2) {
LocalizerHeartbeatResponse response=rls.heartbeat(createLocalizerStatus(localizerId1));
for ( ResourceLocalizationSpec resourceSpec : response.getResourceSpecs()) {
returnedResources++;
Path destinationDirectory=new Path(resourceSpec.getDestinationDirectory().getFile());
if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.APPLICATION) {
appRsrc=true;
Assert.assertEquals(userAppCachePath,destinationDirectory.getParent().toUri().toString());
}
else if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.PRIVATE) {
privRsrc=true;
Assert.assertEquals(userCachePath,destinationDirectory.getParent().toUri().toString());
}
else {
throw new Exception("Unexpected resource recevied.");
}
}
}
Assert.assertTrue(appRsrc && privRsrc);
}
finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test @SuppressWarnings("unchecked") public void testPublicResourceAddResourceExceptions() throws Exception {
List localDirs=new ArrayList();
String[] sDirs=new String[4];
for (int i=0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY,true);
DrainDispatcher dispatcher=new DrainDispatcher();
EventHandler applicationBus=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
DeletionService delService=mock(DeletionService.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
LocalDirsHandlerService dirsHandlerSpy=spy(dirsHandler);
dirsHandlerSpy.init(conf);
dispatcher.init(conf);
dispatcher.start();
try {
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandlerSpy,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
doReturn(mockServer).when(spyService).createServer();
doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
spyService.init(conf);
spyService.start();
final String user="user0";
final Application app=mock(Application.class);
final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
dispatcher.await();
Random r=new Random();
r.setSeed(r.nextLong());
final LocalResource pubResource=getPublicMockedResource(r);
final LocalResourceRequest pubReq=new LocalResourceRequest(pubResource);
Map> req=new HashMap>();
req.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq));
final Container c=getMockContainer(appId,42,user);
Mockito.doThrow(new IOException()).when(dirsHandlerSpy).getLocalPathForWrite(isA(String.class),Mockito.anyLong(),Mockito.anyBoolean());
spyService.handle(new ContainerLocalizationRequestEvent(c,req));
dispatcher.await();
LocalResourcesTracker tracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,appId);
Assert.assertNull(tracker.getLocalizedResource(pubReq));
Mockito.doCallRealMethod().when(dirsHandlerSpy).getLocalPathForWrite(isA(String.class),Mockito.anyLong(),Mockito.anyBoolean());
PublicLocalizer publicLocalizer=spyService.getPublicLocalizer();
publicLocalizer.threadPool.shutdown();
spyService.handle(new ContainerLocalizationRequestEvent(c,req));
dispatcher.await();
tracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,appId);
Assert.assertNull(tracker.getLocalizedResource(pubReq));
}
finally {
dispatcher.await();
dispatcher.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalizationHeartbeat() throws Exception {
List localDirs=new ArrayList();
String[] sDirs=new String[1];
localDirs.add(lfs.makeQualified(new Path(basedir,0 + "")));
sDirs[0]=localDirs.get(0).toString();
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37");
DrainDispatcher dispatcher=new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
EventHandler applicationBus=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
DeletionService delServiceReal=new DeletionService(exec);
DeletionService delService=spy(delServiceReal);
delService.init(new Configuration());
delService.start();
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandler,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
doReturn(mockServer).when(spyService).createServer();
doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
try {
spyService.init(conf);
spyService.start();
final Application app=mock(Application.class);
final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3);
when(app.getUser()).thenReturn("user0");
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
ArgumentMatcher matchesAppInit=new ArgumentMatcher(){
@Override public boolean matches( Object o){
ApplicationEvent evt=(ApplicationEvent)o;
return evt.getType() == ApplicationEventType.APPLICATION_INITED && appId == evt.getApplicationID();
}
}
;
dispatcher.await();
verify(applicationBus).handle(argThat(matchesAppInit));
Random r=new Random();
long seed=r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
final Container c=getMockContainer(appId,42,"user0");
FSDataOutputStream out=new FSDataOutputStream(new DataOutputBuffer(),null);
doReturn(out).when(spylfs).createInternal(isA(Path.class),isA(EnumSet.class),isA(FsPermission.class),anyInt(),anyShort(),anyLong(),isA(Progressable.class),isA(ChecksumOpt.class),anyBoolean());
final LocalResource resource1=getPrivateMockedResource(r);
LocalResource resource2=null;
do {
resource2=getPrivateMockedResource(r);
}
while (resource2 == null || resource2.equals(resource1));
final LocalResourceRequest req1=new LocalResourceRequest(resource1);
final LocalResourceRequest req2=new LocalResourceRequest(resource2);
Map> rsrcs=new HashMap>();
List privateResourceList=new ArrayList();
privateResourceList.add(req1);
privateResourceList.add(req2);
rsrcs.put(LocalResourceVisibility.PRIVATE,privateResourceList);
spyService.handle(new ContainerLocalizationRequestEvent(c,rsrcs));
Thread.sleep(1000);
dispatcher.await();
String appStr=ConverterUtils.toString(appId);
String ctnrStr=c.getContainerId().toString();
ArgumentCaptor tokenPathCaptor=ArgumentCaptor.forClass(Path.class);
verify(exec).startLocalizer(tokenPathCaptor.capture(),isA(InetSocketAddress.class),eq("user0"),eq(appStr),eq(ctnrStr),isA(List.class),isA(List.class));
Path localizationTokenPath=tokenPathCaptor.getValue();
LocalResourceStatus rsrcStat1=mock(LocalResourceStatus.class);
LocalResourceStatus rsrcStat2=mock(LocalResourceStatus.class);
LocalizerStatus stat=mock(LocalizerStatus.class);
when(stat.getLocalizerId()).thenReturn(ctnrStr);
when(rsrcStat1.getResource()).thenReturn(resource1);
when(rsrcStat2.getResource()).thenReturn(resource2);
when(rsrcStat1.getLocalSize()).thenReturn(4344L);
when(rsrcStat2.getLocalSize()).thenReturn(2342L);
URL locPath=getPath("/cache/private/blah");
when(rsrcStat1.getLocalPath()).thenReturn(locPath);
when(rsrcStat2.getLocalPath()).thenReturn(locPath);
when(rsrcStat1.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS);
when(rsrcStat2.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS);
when(stat.getResources()).thenReturn(Collections.emptyList()).thenReturn(Collections.singletonList(rsrcStat1)).thenReturn(Collections.singletonList(rsrcStat2)).thenReturn(Collections.emptyList());
String localPath=Path.SEPARATOR + ContainerLocalizer.USERCACHE + Path.SEPARATOR+ "user0"+ Path.SEPARATOR+ ContainerLocalizer.FILECACHE;
LocalizerHeartbeatResponse response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.LIVE,response.getLocalizerAction());
assertEquals(1,response.getResourceSpecs().size());
assertEquals(req1,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource()));
URL localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory();
assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "10"));
response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.LIVE,response.getLocalizerAction());
assertEquals(1,response.getResourceSpecs().size());
assertEquals(req2,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource()));
localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory();
assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "0"+ Path.SEPARATOR+ "11"));
response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.LIVE,response.getLocalizerAction());
assertEquals(0,response.getResourceSpecs().size());
response=spyService.heartbeat(stat);
assertEquals(LocalizerAction.DIE,response.getLocalizerAction());
dispatcher.await();
ArgumentMatcher matchesContainerLoc=new ArgumentMatcher(){
@Override public boolean matches( Object o){
ContainerEvent evt=(ContainerEvent)o;
return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED && c.getContainerId() == evt.getContainerID();
}
}
;
verify(containerBus,times(2)).handle(argThat(matchesContainerLoc));
verify(delService).delete((String)isNull(),eq(localizationTokenPath));
}
finally {
spyService.stop();
dispatcher.stop();
delService.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testResourceRelease() throws Exception {
List localDirs=new ArrayList();
String[] sDirs=new String[4];
for (int i=0; i < 4; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir,i + "")));
sDirs[i]=localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs);
LocalizerTracker mockLocallilzerTracker=mock(LocalizerTracker.class);
DrainDispatcher dispatcher=new DrainDispatcher();
dispatcher.init(conf);
dispatcher.start();
EventHandler applicationBus=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,applicationBus);
EventHandler containerBus=mock(EventHandler.class);
dispatcher.register(ContainerEventType.class,containerBus);
EventHandler localizerBus=mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class,localizerBus);
ContainerExecutor exec=mock(ContainerExecutor.class);
LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService();
dirsHandler.init(conf);
DeletionService delService=new DeletionService(exec);
delService.init(new Configuration());
delService.start();
ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandler,new NMNullStateStoreService());
ResourceLocalizationService spyService=spy(rawService);
doReturn(mockServer).when(spyService).createServer();
doReturn(mockLocallilzerTracker).when(spyService).createLocalizerTracker(isA(Configuration.class));
doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class));
try {
spyService.init(conf);
spyService.start();
final String user="user0";
final Application app=mock(Application.class);
final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app));
dispatcher.await();
LocalResourcesTracker appTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user,appId);
LocalResourcesTracker privTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user,appId);
LocalResourcesTracker pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,appId);
final Container c=getMockContainer(appId,42,user);
Random r=new Random();
long seed=r.nextLong();
System.out.println("SEED: " + seed);
r.setSeed(seed);
final LocalResource privResource=getPrivateMockedResource(r);
final LocalResourceRequest privReq=new LocalResourceRequest(privResource);
final LocalResource pubResource=getPublicMockedResource(r);
final LocalResourceRequest pubReq=new LocalResourceRequest(pubResource);
final LocalResource pubResource2=getPublicMockedResource(r);
final LocalResourceRequest pubReq2=new LocalResourceRequest(pubResource2);
final LocalResource appResource=getAppMockedResource(r);
final LocalResourceRequest appReq=new LocalResourceRequest(appResource);
Map> req=new HashMap>();
req.put(LocalResourceVisibility.PRIVATE,Collections.singletonList(privReq));
req.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq));
req.put(LocalResourceVisibility.APPLICATION,Collections.singletonList(appReq));
Map> req2=new HashMap>();
req2.put(LocalResourceVisibility.PRIVATE,Collections.singletonList(privReq));
req2.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq2));
Set pubRsrcs=new HashSet();
pubRsrcs.add(pubReq);
pubRsrcs.add(pubReq2);
spyService.handle(new ContainerLocalizationRequestEvent(c,req));
spyService.handle(new ContainerLocalizationRequestEvent(c,req2));
dispatcher.await();
int privRsrcCount=0;
for ( LocalizedResource lr : privTracker) {
privRsrcCount++;
Assert.assertEquals("Incorrect reference count",2,lr.getRefCount());
Assert.assertEquals(privReq,lr.getRequest());
}
Assert.assertEquals(1,privRsrcCount);
int pubRsrcCount=0;
for ( LocalizedResource lr : pubTracker) {
pubRsrcCount++;
Assert.assertEquals("Incorrect reference count",1,lr.getRefCount());
pubRsrcs.remove(lr.getRequest());
}
Assert.assertEquals(0,pubRsrcs.size());
Assert.assertEquals(2,pubRsrcCount);
int appRsrcCount=0;
for ( LocalizedResource lr : appTracker) {
appRsrcCount++;
Assert.assertEquals("Incorrect reference count",1,lr.getRefCount());
Assert.assertEquals(appReq,lr.getRequest());
}
Assert.assertEquals(1,appRsrcCount);
spyService.handle(new ContainerLocalizationCleanupEvent(c,req));
verify(mockLocallilzerTracker).cleanupPrivLocalizers("container_314159265358979_0003_01_000042");
req2.remove(LocalResourceVisibility.PRIVATE);
spyService.handle(new ContainerLocalizationCleanupEvent(c,req2));
dispatcher.await();
pubRsrcs.add(pubReq);
pubRsrcs.add(pubReq2);
privRsrcCount=0;
for ( LocalizedResource lr : privTracker) {
privRsrcCount++;
Assert.assertEquals("Incorrect reference count",1,lr.getRefCount());
Assert.assertEquals(privReq,lr.getRequest());
}
Assert.assertEquals(1,privRsrcCount);
pubRsrcCount=0;
for ( LocalizedResource lr : pubTracker) {
pubRsrcCount++;
Assert.assertEquals("Incorrect reference count",0,lr.getRefCount());
pubRsrcs.remove(lr.getRequest());
}
Assert.assertEquals(0,pubRsrcs.size());
Assert.assertEquals(2,pubRsrcCount);
appRsrcCount=0;
for ( LocalizedResource lr : appTracker) {
appRsrcCount++;
Assert.assertEquals("Incorrect reference count",0,lr.getRefCount());
Assert.assertEquals(appReq,lr.getRequest());
}
Assert.assertEquals(1,appRsrcCount);
}
finally {
dispatcher.stop();
delService.stop();
}
}
InternalCallVerifier BooleanVerifier
@Test public void testRsrcUnused(){
DeletionService delService=mock(DeletionService.class);
long TARGET_MB=10 << 20;
ResourceRetentionSet rss=new ResourceRetentionSet(delService,TARGET_MB);
LocalResourcesTracker pubTracker=createMockTracker(null,3 * 1024 * 1024,2,10,5);
LocalResourcesTracker trackerA=createMockTracker("A",1 * 1024 * 1024,4,3,3);
LocalResourcesTracker trackerB=createMockTracker("B",4 * 1024 * 1024,1,10,5);
LocalResourcesTracker trackerC=createMockTracker("C",2 * 1024 * 1024,3,7,2);
rss.addResources(pubTracker);
rss.addResources(trackerA);
rss.addResources(trackerB);
rss.addResources(trackerC);
long deleted=0L;
ArgumentCaptor captor=ArgumentCaptor.forClass(LocalizedResource.class);
verify(pubTracker,atMost(2)).remove(captor.capture(),isA(DeletionService.class));
verify(trackerA,atMost(4)).remove(captor.capture(),isA(DeletionService.class));
verify(trackerB,atMost(1)).remove(captor.capture(),isA(DeletionService.class));
verify(trackerC,atMost(3)).remove(captor.capture(),isA(DeletionService.class));
for ( LocalizedResource rem : captor.getAllValues()) {
deleted+=rem.getSize();
}
assertTrue(deleted >= 10 * 1024 * 1024);
assertTrue(deleted < 15 * 1024 * 1024);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testLocalFileDeletionAfterUpload() throws Exception {
this.delSrvc=new DeletionService(createContainerExecutor());
delSrvc=spy(delSrvc);
this.delSrvc.init(conf);
this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath());
DrainDispatcher dispatcher=createDispatcher();
EventHandler appEventHandler=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,appEventHandler);
LogAggregationService logAggregationService=spy(new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler));
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1=BuilderUtils.newApplicationId(1234,1);
File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1));
app1LogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls));
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(application1,1);
ContainerId container11=BuilderUtils.newContainerId(appAttemptId,1);
writeContainerLogs(app1LogDir,container11);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container11,0));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
logAggregationService.stop();
assertEquals(0,logAggregationService.getNumAggregators());
verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class));
verify(delSrvc).delete(eq(user),eq((Path)null),eq(new Path(app1LogDir.getAbsolutePath())));
delSrvc.stop();
String containerIdStr=ConverterUtils.toString(container11);
File containerLogDir=new File(app1LogDir,containerIdStr);
for ( String fileType : new String[]{"stdout","stderr","syslog"}) {
File f=new File(containerLogDir,fileType);
Assert.assertFalse("check " + f,f.exists());
}
Assert.assertFalse(app1LogDir.exists());
Path logFilePath=logAggregationService.getRemoteNodeLogFileForApp(application1,this.user);
Assert.assertTrue("Log file [" + logFilePath + "] not found",new File(logFilePath.toUri().getPath()).exists());
dispatcher.await();
ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)};
checkEvents(appEventHandler,expectedEvents,true,"getType","getApplicationID");
dispatcher.stop();
}
InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testMultipleAppsLogAggregation() throws Exception {
this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath());
DrainDispatcher dispatcher=createDispatcher();
EventHandler appEventHandler=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,appEventHandler);
LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler);
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1=BuilderUtils.newApplicationId(1234,1);
File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1));
app1LogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls));
ApplicationAttemptId appAttemptId1=BuilderUtils.newApplicationAttemptId(application1,1);
ContainerId container11=BuilderUtils.newContainerId(appAttemptId1,1);
writeContainerLogs(app1LogDir,container11);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container11,0));
ApplicationId application2=BuilderUtils.newApplicationId(1234,2);
ApplicationAttemptId appAttemptId2=BuilderUtils.newApplicationAttemptId(application2,1);
File app2LogDir=new File(localLogDir,ConverterUtils.toString(application2));
app2LogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application2,this.user,null,ContainerLogsRetentionPolicy.APPLICATION_MASTER_ONLY,this.acls));
ContainerId container21=BuilderUtils.newContainerId(appAttemptId2,1);
writeContainerLogs(app2LogDir,container21);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container21,0));
ContainerId container12=BuilderUtils.newContainerId(appAttemptId1,2);
writeContainerLogs(app1LogDir,container12);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container12,0));
ApplicationId application3=BuilderUtils.newApplicationId(1234,3);
ApplicationAttemptId appAttemptId3=BuilderUtils.newApplicationAttemptId(application3,1);
File app3LogDir=new File(localLogDir,ConverterUtils.toString(application3));
app3LogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application3,this.user,null,ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY,this.acls));
dispatcher.await();
ApplicationEvent expectedInitEvents[]=new ApplicationEvent[]{new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(application2,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(application3,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED)};
checkEvents(appEventHandler,expectedInitEvents,false,"getType","getApplicationID");
reset(appEventHandler);
ContainerId container31=BuilderUtils.newContainerId(appAttemptId3,1);
writeContainerLogs(app3LogDir,container31);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container31,0));
ContainerId container32=BuilderUtils.newContainerId(appAttemptId3,2);
writeContainerLogs(app3LogDir,container32);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container32,1));
ContainerId container22=BuilderUtils.newContainerId(appAttemptId2,2);
writeContainerLogs(app2LogDir,container22);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container22,0));
ContainerId container33=BuilderUtils.newContainerId(appAttemptId3,3);
writeContainerLogs(app3LogDir,container33);
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container33,0));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application2));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application3));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
logAggregationService.stop();
assertEquals(0,logAggregationService.getNumAggregators());
verifyContainerLogs(logAggregationService,application1,new ContainerId[]{container11,container12});
verifyContainerLogs(logAggregationService,application2,new ContainerId[]{container21});
verifyContainerLogs(logAggregationService,application3,new ContainerId[]{container31,container32});
dispatcher.await();
ApplicationEvent[] expectedFinishedEvents=new ApplicationEvent[]{new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED),new ApplicationEvent(application2,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED),new ApplicationEvent(application3,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)};
checkEvents(appEventHandler,expectedFinishedEvents,false,"getType","getApplicationID");
dispatcher.stop();
}
InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testLogAggregationCreateDirsFailsWithoutKillingNM() throws Exception {
this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath());
DrainDispatcher dispatcher=createDispatcher();
EventHandler appEventHandler=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,appEventHandler);
LogAggregationService logAggregationService=spy(new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler));
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId appId=BuilderUtils.newApplicationId(System.currentTimeMillis(),(int)Math.random());
Exception e=new RuntimeException("KABOOM!");
doThrow(e).when(logAggregationService).createAppDir(any(String.class),any(ApplicationId.class),any(UserGroupInformation.class));
logAggregationService.handle(new LogHandlerAppStartedEvent(appId,this.user,null,ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY,this.acls));
dispatcher.await();
ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(appId,ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED)};
checkEvents(appEventHandler,expectedEvents,false,"getType","getApplicationID","getDiagnostic");
verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class));
logAggregationService.handle(new LogHandlerContainerFinishedEvent(BuilderUtils.newContainerId(4,1,1,1),0));
dispatcher.await();
logAggregationService.handle(new LogHandlerAppFinishedEvent(BuilderUtils.newApplicationId(1,5)));
dispatcher.await();
logAggregationService.stop();
assertEquals(0,logAggregationService.getNumAggregators());
}
InternalCallVerifier EqualityVerifier
@Test @SuppressWarnings("unchecked") public void testLogAggregatorCleanup() throws Exception {
DeletionService delSrvc=mock(DeletionService.class);
LocalDirsHandlerService mockedDirSvc=mock(LocalDirsHandlerService.class);
DrainDispatcher dispatcher=createDispatcher();
EventHandler appEventHandler=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,appEventHandler);
LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,delSrvc,mockedDirSvc);
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1=BuilderUtils.newApplicationId(1234,1);
logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
dispatcher.await();
int timeToWait=20 * 1000;
while (timeToWait > 0 && logAggregationService.getNumAggregators() > 0) {
Thread.sleep(100);
timeToWait-=100;
}
Assert.assertEquals("Log aggregator failed to cleanup!",0,logAggregationService.getNumAggregators());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test @SuppressWarnings("unchecked") public void testNoContainerOnNode() throws Exception {
this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath());
DrainDispatcher dispatcher=createDispatcher();
EventHandler appEventHandler=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,appEventHandler);
LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler);
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1=BuilderUtils.newApplicationId(1234,1);
File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1));
app1LogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
logAggregationService.stop();
assertEquals(0,logAggregationService.getNumAggregators());
Assert.assertFalse(new File(logAggregationService.getRemoteNodeLogFileForApp(application1,this.user).toUri().getPath()).exists());
dispatcher.await();
ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)};
checkEvents(appEventHandler,expectedEvents,true,"getType","getApplicationID");
dispatcher.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=20000) @SuppressWarnings("unchecked") public void testStopAfterError() throws Exception {
DeletionService delSrvc=mock(DeletionService.class);
LocalDirsHandlerService mockedDirSvc=mock(LocalDirsHandlerService.class);
when(mockedDirSvc.getLogDirs()).thenThrow(new RuntimeException());
DrainDispatcher dispatcher=createDispatcher();
EventHandler appEventHandler=mock(EventHandler.class);
dispatcher.register(ApplicationEventType.class,appEventHandler);
LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,delSrvc,mockedDirSvc);
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1=BuilderUtils.newApplicationId(1234,1);
logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls));
logAggregationService.stop();
assertEquals(0,logAggregationService.getNumAggregators());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testContainerMonitorMemFlags(){
ContainersMonitor cm=null;
long expPmem=8192 * 1024 * 1024l;
long expVmem=(long)(expPmem * 2.1f);
cm=new ContainersMonitorImpl(mock(ContainerExecutor.class),mock(AsyncDispatcher.class),mock(Context.class));
cm.init(getConfForCM(false,false,8192,2.1f));
assertEquals(expPmem,cm.getPmemAllocatedForContainers());
assertEquals(expVmem,cm.getVmemAllocatedForContainers());
assertEquals(false,cm.isPmemCheckEnabled());
assertEquals(false,cm.isVmemCheckEnabled());
cm=new ContainersMonitorImpl(mock(ContainerExecutor.class),mock(AsyncDispatcher.class),mock(Context.class));
cm.init(getConfForCM(true,false,8192,2.1f));
assertEquals(expPmem,cm.getPmemAllocatedForContainers());
assertEquals(expVmem,cm.getVmemAllocatedForContainers());
assertEquals(true,cm.isPmemCheckEnabled());
assertEquals(false,cm.isVmemCheckEnabled());
cm=new ContainersMonitorImpl(mock(ContainerExecutor.class),mock(AsyncDispatcher.class),mock(Context.class));
cm.init(getConfForCM(true,true,8192,2.1f));
assertEquals(expPmem,cm.getPmemAllocatedForContainers());
assertEquals(expVmem,cm.getVmemAllocatedForContainers());
assertEquals(true,cm.isPmemCheckEnabled());
assertEquals(true,cm.isVmemCheckEnabled());
cm=new ContainersMonitorImpl(mock(ContainerExecutor.class),mock(AsyncDispatcher.class),mock(Context.class));
cm.init(getConfForCM(false,true,8192,2.1f));
assertEquals(expPmem,cm.getPmemAllocatedForContainers());
assertEquals(expVmem,cm.getVmemAllocatedForContainers());
assertEquals(false,cm.isPmemCheckEnabled());
assertEquals(true,cm.isVmemCheckEnabled());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
/**
* Test to verify the check for whether a process tree is over limit or not.
* @throws IOExceptionif there was a problem setting up the fake procfs directories or
* files.
*/
@Test public void testProcessTreeLimits() throws IOException {
File procfsRootDir=new File(localDir,"proc");
String[] pids={"100","200","300","400","500","600","700"};
try {
TestProcfsBasedProcessTree.setupProcfsRootDir(procfsRootDir);
TestProcfsBasedProcessTree.setupPidDirs(procfsRootDir,pids);
TestProcfsBasedProcessTree.ProcessStatInfo[] procs=new TestProcfsBasedProcessTree.ProcessStatInfo[7];
procs[0]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000"});
procs[1]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"200","proc2","1","200","200","200000"});
procs[2]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"300","proc3","200","200","200","300000"});
procs[3]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"400","proc4","200","200","200","400000"});
procs[4]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"500","proc5","100","100","100","1500000"});
procs[5]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"600","proc6","1","600","600","100000"});
procs[6]=new TestProcfsBasedProcessTree.ProcessStatInfo(new String[]{"700","proc7","600","600","600","100000"});
TestProcfsBasedProcessTree.writeStatFiles(procfsRootDir,pids,procs,null);
long limit=700000;
ContainersMonitorImpl test=new ContainersMonitorImpl(null,null,null);
ProcfsBasedProcessTree pTree=new ProcfsBasedProcessTree("100",procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertTrue("tree rooted at 100 should be over limit " + "after first iteration.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree=new ProcfsBasedProcessTree("200",procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertFalse("tree rooted at 200 shouldn't be over limit " + "after one iteration.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree.updateProcessTree();
assertTrue("tree rooted at 200 should be over limit after 2 iterations",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree=new ProcfsBasedProcessTree("600",procfsRootDir.getAbsolutePath());
pTree.updateProcessTree();
assertFalse("tree rooted at 600 should never be over limit.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
pTree.updateProcessTree();
assertFalse("tree rooted at 600 should never be over limit.",test.isProcessTreeOverLimit(pTree,"dummyId",limit));
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException {
if (!ProcfsBasedProcessTree.isAvailable()) {
return;
}
containerManager.start();
File scriptFile=new File(tmpDir,"scriptFile.sh");
PrintWriter fileWriter=new PrintWriter(scriptFile);
File processStartFile=new File(tmpDir,"start_file.txt").getAbsoluteFile();
fileWriter.write("\numask 0");
fileWriter.write("\necho Hello World! > " + processStartFile);
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nsleep 15");
fileWriter.close();
ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class);
ApplicationId appId=ApplicationId.newInstance(0,0);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1);
ContainerId cId=ContainerId.newInstance(appAttemptId,0);
int port=12345;
URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile="dest_file";
Map localResources=new HashMap();
localResources.put(destinationFile,rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List commands=new ArrayList();
commands.add("/bin/bash");
commands.add(scriptFile.getAbsolutePath());
containerLaunchContext.setCommands(commands);
Resource r=BuilderUtils.newResource(8 * 1024 * 1024,1);
ContainerTokenIdentifier containerIdentifier=new ContainerTokenIdentifier(cId,context.getNodeId().toString(),user,r,System.currentTimeMillis() + 120000,123,DUMMY_RM_IDENTIFIER,Priority.newInstance(0),0);
Token containerToken=BuilderUtils.newContainerToken(context.getNodeId(),containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier),containerIdentifier);
StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,containerToken);
List list=new ArrayList();
list.add(scRequest);
StartContainersRequest allRequests=StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
int timeoutSecs=0;
while (!processStartFile.exists() && timeoutSecs++ < 20) {
Thread.sleep(1000);
LOG.info("Waiting for process start-file to be created");
}
Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists());
BufferedReader reader=new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals("Hello World!",reader.readLine());
String pid=reader.readLine().trim();
Assert.assertEquals(null,reader.readLine());
BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE,60);
List containerIds=new ArrayList();
containerIds.add(cId);
GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds);
ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0);
Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM,containerStatus.getExitStatus());
String expectedMsgPattern="Container \\[pid=" + pid + ",containerID="+ cId+ "\\] is running beyond virtual memory limits. Current usage: "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. "+ "Killing container.\nDump of the process-tree for "+ cId+ " :\n";
Pattern pat=Pattern.compile(expectedMsgPattern);
Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: "+ containerStatus.getDiagnostics(),true,pat.matcher(containerStatus.getDiagnostics()).find());
Assert.assertFalse("Process is still alive!",exec.signalContainer(user,pid,Signal.NULL));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRemoveLocalizedResource() throws IOException {
String user="somebody";
ApplicationId appId=ApplicationId.newInstance(1,1);
Path appRsrcPath=new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L);
LocalResourceProto appRsrcProto=rsrcPb.getProto();
Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build();
stateStore.finishResourceLocalization(user,appId,appLocalizedProto);
stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath);
restartStateStore();
verifyEmptyState();
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath);
restartStateStore();
verifyEmptyState();
Path pubRsrcPath1=new Path("hdfs://some/public/resource1");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto1=rsrcPb.getProto();
Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1);
LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(789L).build();
stateStore.finishResourceLocalization(null,null,pubLocalizedProto1);
Path pubRsrcPath2=new Path("hdfs://some/public/resource2");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto2=rsrcPb.getProto();
Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2);
LocalizedResourceProto pubLocalizedProto2=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto2).setLocalPath(pubRsrcLocalPath2.toString()).setSize(7654321L).build();
stateStore.finishResourceLocalization(null,null,pubLocalizedProto2);
stateStore.removeLocalizedResource(null,null,pubRsrcLocalPath2);
Path privRsrcPath=new Path("hdfs://some/private/resource");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*");
LocalResourceProto privRsrcProto=rsrcPb.getProto();
Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath);
stateStore.removeLocalizedResource(user,null,privRsrcLocalPath);
restartStateStore();
RecoveredLocalizationState state=stateStore.loadLocalizationState();
LocalResourceTrackerState pubts=state.getPublicTrackerState();
assertTrue(pubts.getInProgressResources().isEmpty());
assertEquals(1,pubts.getLocalizedResources().size());
assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next());
Map userResources=state.getUserResources();
assertTrue(userResources.isEmpty());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testApplicationStorage() throws IOException {
RecoveredApplicationsState state=stateStore.loadApplicationsState();
assertTrue(state.getApplications().isEmpty());
assertTrue(state.getFinishedApplications().isEmpty());
final ApplicationId appId1=ApplicationId.newInstance(1234,1);
ContainerManagerApplicationProto.Builder builder=ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl)appId1).getProto());
builder.setUser("user1");
ContainerManagerApplicationProto appProto1=builder.build();
stateStore.storeApplication(appId1,appProto1);
restartStateStore();
state=stateStore.loadApplicationsState();
assertEquals(1,state.getApplications().size());
assertEquals(appProto1,state.getApplications().get(0));
assertTrue(state.getFinishedApplications().isEmpty());
stateStore.storeFinishedApplication(appId1);
final ApplicationId appId2=ApplicationId.newInstance(1234,2);
builder=ContainerManagerApplicationProto.newBuilder();
builder.setId(((ApplicationIdPBImpl)appId2).getProto());
builder.setUser("user2");
ContainerManagerApplicationProto appProto2=builder.build();
stateStore.storeApplication(appId2,appProto2);
restartStateStore();
state=stateStore.loadApplicationsState();
assertEquals(2,state.getApplications().size());
assertTrue(state.getApplications().contains(appProto1));
assertTrue(state.getApplications().contains(appProto2));
assertEquals(1,state.getFinishedApplications().size());
assertEquals(appId1,state.getFinishedApplications().get(0));
stateStore.storeFinishedApplication(appId2);
stateStore.removeApplication(appId2);
restartStateStore();
state=stateStore.loadApplicationsState();
assertEquals(1,state.getApplications().size());
assertEquals(appProto1,state.getApplications().get(0));
assertEquals(1,state.getFinishedApplications().size());
assertEquals(appId1,state.getFinishedApplications().get(0));
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testNMTokenStorage() throws IOException {
RecoveredNMTokensState state=stateStore.loadNMTokensState();
assertNull(state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getApplicationMasterKeys().isEmpty());
NMTokenSecretManagerForTest secretMgr=new NMTokenSecretManagerForTest();
MasterKey currentKey=secretMgr.generateKey();
stateStore.storeNMTokenCurrentMasterKey(currentKey);
restartStateStore();
state=stateStore.loadNMTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getApplicationMasterKeys().isEmpty());
MasterKey prevKey=secretMgr.generateKey();
stateStore.storeNMTokenPreviousMasterKey(prevKey);
restartStateStore();
state=stateStore.loadNMTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
assertTrue(state.getApplicationMasterKeys().isEmpty());
ApplicationAttemptId attempt1=ApplicationAttemptId.newInstance(ApplicationId.newInstance(1,1),1);
MasterKey attemptKey1=secretMgr.generateKey();
stateStore.storeNMTokenApplicationMasterKey(attempt1,attemptKey1);
ApplicationAttemptId attempt2=ApplicationAttemptId.newInstance(ApplicationId.newInstance(2,3),4);
MasterKey attemptKey2=secretMgr.generateKey();
stateStore.storeNMTokenApplicationMasterKey(attempt2,attemptKey2);
restartStateStore();
state=stateStore.loadNMTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
Map loadedAppKeys=state.getApplicationMasterKeys();
assertEquals(2,loadedAppKeys.size());
assertEquals(attemptKey1,loadedAppKeys.get(attempt1));
assertEquals(attemptKey2,loadedAppKeys.get(attempt2));
ApplicationAttemptId attempt3=ApplicationAttemptId.newInstance(ApplicationId.newInstance(5,6),7);
MasterKey attemptKey3=secretMgr.generateKey();
stateStore.storeNMTokenApplicationMasterKey(attempt3,attemptKey3);
stateStore.removeNMTokenApplicationMasterKey(attempt1);
attemptKey2=prevKey;
stateStore.storeNMTokenApplicationMasterKey(attempt2,attemptKey2);
prevKey=currentKey;
stateStore.storeNMTokenPreviousMasterKey(prevKey);
currentKey=secretMgr.generateKey();
stateStore.storeNMTokenCurrentMasterKey(currentKey);
restartStateStore();
state=stateStore.loadNMTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
loadedAppKeys=state.getApplicationMasterKeys();
assertEquals(2,loadedAppKeys.size());
assertNull(loadedAppKeys.get(attempt1));
assertEquals(attemptKey2,loadedAppKeys.get(attempt2));
assertEquals(attemptKey3,loadedAppKeys.get(attempt3));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainerStorage() throws IOException {
List recoveredContainers=stateStore.loadContainersState();
assertTrue(recoveredContainers.isEmpty());
ApplicationId appId=ApplicationId.newInstance(1234,3);
ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,4);
ContainerId containerId=ContainerId.newInstance(appAttemptId,5);
LocalResource lrsrc=LocalResource.newInstance(URL.newInstance("hdfs","somehost",12345,"/some/path/to/rsrc"),LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,123L,1234567890L);
Map localResources=new HashMap();
localResources.put("rsrc",lrsrc);
Map env=new HashMap();
env.put("somevar","someval");
List containerCmds=new ArrayList();
containerCmds.add("somecmd");
containerCmds.add("somearg");
Map serviceData=new HashMap();
serviceData.put("someservice",ByteBuffer.wrap(new byte[]{0x1,0x2,0x3}));
ByteBuffer containerTokens=ByteBuffer.wrap(new byte[]{0x7,0x8,0x9,0xa});
Map acls=new HashMap();
acls.put(ApplicationAccessType.VIEW_APP,"viewuser");
acls.put(ApplicationAccessType.MODIFY_APP,"moduser");
ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,env,containerCmds,serviceData,containerTokens,acls);
Resource containerRsrc=Resource.newInstance(1357,3);
ContainerTokenIdentifier containerTokenId=new ContainerTokenIdentifier(containerId,"host","user",containerRsrc,9876543210L,42,2468,Priority.newInstance(7),13579);
Token containerToken=Token.newInstance(containerTokenId.getBytes(),ContainerTokenIdentifier.KIND.toString(),"password".getBytes(),"tokenservice");
StartContainerRequest containerReq=StartContainerRequest.newInstance(clc,containerToken);
stateStore.storeContainer(containerId,containerReq);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
RecoveredContainerState rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.REQUESTED,rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode());
assertEquals(false,rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertTrue(rcs.getDiagnostics().isEmpty());
StringBuilder diags=new StringBuilder();
stateStore.storeContainerLaunched(containerId);
diags.append("some diags for container");
stateStore.storeContainerDiagnostics(containerId,diags);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode());
assertEquals(false,rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertEquals(diags.toString(),rcs.getDiagnostics());
diags.append("some more diags for container");
stateStore.storeContainerDiagnostics(containerId,diags);
stateStore.storeContainerKilled(containerId);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus());
assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode());
assertTrue(rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertEquals(diags.toString(),rcs.getDiagnostics());
diags.append("some final diags");
stateStore.storeContainerDiagnostics(containerId,diags);
stateStore.storeContainerCompleted(containerId,21);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertEquals(1,recoveredContainers.size());
rcs=recoveredContainers.get(0);
assertEquals(RecoveredContainerStatus.COMPLETED,rcs.getStatus());
assertEquals(21,rcs.getExitCode());
assertTrue(rcs.getKilled());
assertEquals(containerReq,rcs.getStartRequest());
assertEquals(diags.toString(),rcs.getDiagnostics());
stateStore.removeContainer(containerId);
restartStateStore();
recoveredContainers=stateStore.loadContainersState();
assertTrue(recoveredContainers.isEmpty());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCheckVersion() throws IOException {
Version defaultVersion=stateStore.getCurrentVersion();
Assert.assertEquals(defaultVersion,stateStore.loadVersion());
Version compatibleVersion=Version.newInstance(defaultVersion.getMajorVersion(),defaultVersion.getMinorVersion() + 2);
stateStore.storeVersion(compatibleVersion);
Assert.assertEquals(compatibleVersion,stateStore.loadVersion());
restartStateStore();
Assert.assertEquals(defaultVersion,stateStore.loadVersion());
Version incompatibleVersion=Version.newInstance(defaultVersion.getMajorVersion() + 1,defaultVersion.getMinorVersion());
stateStore.storeVersion(incompatibleVersion);
try {
restartStateStore();
Assert.fail("Incompatible version, should expect fail here.");
}
catch ( ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for NM state:"));
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDeletionTaskStorage() throws IOException {
RecoveredDeletionServiceState state=stateStore.loadDeletionServiceState();
assertTrue(state.getTasks().isEmpty());
DeletionServiceDeleteTaskProto proto=DeletionServiceDeleteTaskProto.newBuilder().setId(7).setUser("someuser").setSubdir("some/subdir").addBasedirs("some/dir/path").addBasedirs("some/other/dir/path").setDeletionTime(123456L).addSuccessorIds(8).addSuccessorIds(9).build();
stateStore.storeDeletionTask(proto.getId(),proto);
restartStateStore();
state=stateStore.loadDeletionServiceState();
assertEquals(1,state.getTasks().size());
assertEquals(proto,state.getTasks().get(0));
DeletionServiceDeleteTaskProto proto2=DeletionServiceDeleteTaskProto.newBuilder().setId(8).setUser("user2").setSubdir("subdir2").setDeletionTime(789L).build();
stateStore.storeDeletionTask(proto2.getId(),proto2);
restartStateStore();
state=stateStore.loadDeletionServiceState();
assertEquals(2,state.getTasks().size());
assertTrue(state.getTasks().contains(proto));
assertTrue(state.getTasks().contains(proto2));
stateStore.removeDeletionTask(proto2.getId());
restartStateStore();
state=stateStore.loadDeletionServiceState();
assertEquals(1,state.getTasks().size());
assertEquals(proto,state.getTasks().get(0));
stateStore.removeDeletionTask(proto.getId());
restartStateStore();
state=stateStore.loadDeletionServiceState();
assertTrue(state.getTasks().isEmpty());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testStartResourceLocalization() throws IOException {
String user="somebody";
ApplicationId appId=ApplicationId.newInstance(1,1);
Path appRsrcPath=new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L);
LocalResourceProto appRsrcProto=rsrcPb.getProto();
Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
restartStateStore();
RecoveredLocalizationState state=stateStore.loadLocalizationState();
LocalResourceTrackerState pubts=state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertTrue(pubts.getInProgressResources().isEmpty());
Map userResources=state.getUserResources();
assertEquals(1,userResources.size());
RecoveredUserResources rur=userResources.get(user);
LocalResourceTrackerState privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1,rur.getAppTrackerStates().size());
LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getLocalizedResources().isEmpty());
assertEquals(1,appts.getInProgressResources().size());
assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto));
Path pubRsrcPath1=new Path("hdfs://some/public/resource1");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto1=rsrcPb.getProto();
Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1);
Path pubRsrcPath2=new Path("hdfs://some/public/resource2");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto2=rsrcPb.getProto();
Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2);
Path privRsrcPath=new Path("hdfs://some/private/resource");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*");
LocalResourceProto privRsrcProto=rsrcPb.getProto();
Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath);
restartStateStore();
state=stateStore.loadLocalizationState();
pubts=state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertEquals(2,pubts.getInProgressResources().size());
assertEquals(pubRsrcLocalPath1,pubts.getInProgressResources().get(pubRsrcProto1));
assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2));
userResources=state.getUserResources();
assertEquals(1,userResources.size());
rur=userResources.get(user);
privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertEquals(1,privts.getInProgressResources().size());
assertEquals(privRsrcLocalPath,privts.getInProgressResources().get(privRsrcProto));
assertEquals(1,rur.getAppTrackerStates().size());
appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getLocalizedResources().isEmpty());
assertEquals(1,appts.getInProgressResources().size());
assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testFinishResourceLocalization() throws IOException {
String user="somebody";
ApplicationId appId=ApplicationId.newInstance(1,1);
Path appRsrcPath=new Path("hdfs://some/app/resource");
LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L);
LocalResourceProto appRsrcProto=rsrcPb.getProto();
Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc");
stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath);
LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build();
stateStore.finishResourceLocalization(user,appId,appLocalizedProto);
restartStateStore();
RecoveredLocalizationState state=stateStore.loadLocalizationState();
LocalResourceTrackerState pubts=state.getPublicTrackerState();
assertTrue(pubts.getLocalizedResources().isEmpty());
assertTrue(pubts.getInProgressResources().isEmpty());
Map userResources=state.getUserResources();
assertEquals(1,userResources.size());
RecoveredUserResources rur=userResources.get(user);
LocalResourceTrackerState privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertTrue(privts.getLocalizedResources().isEmpty());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1,rur.getAppTrackerStates().size());
LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getInProgressResources().isEmpty());
assertEquals(1,appts.getLocalizedResources().size());
assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next());
Path pubRsrcPath1=new Path("hdfs://some/public/resource1");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto1=rsrcPb.getProto();
Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1");
stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1);
Path pubRsrcPath2=new Path("hdfs://some/public/resource2");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L);
LocalResourceProto pubRsrcProto2=rsrcPb.getProto();
Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2");
stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2);
Path privRsrcPath=new Path("hdfs://some/private/resource");
rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*");
LocalResourceProto privRsrcProto=rsrcPb.getProto();
Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc");
stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath);
LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(pubRsrcProto1.getSize()).build();
stateStore.finishResourceLocalization(null,null,pubLocalizedProto1);
LocalizedResourceProto privLocalizedProto=LocalizedResourceProto.newBuilder().setResource(privRsrcProto).setLocalPath(privRsrcLocalPath.toString()).setSize(privRsrcProto.getSize()).build();
stateStore.finishResourceLocalization(user,null,privLocalizedProto);
restartStateStore();
state=stateStore.loadLocalizationState();
pubts=state.getPublicTrackerState();
assertEquals(1,pubts.getLocalizedResources().size());
assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next());
assertEquals(1,pubts.getInProgressResources().size());
assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2));
userResources=state.getUserResources();
assertEquals(1,userResources.size());
rur=userResources.get(user);
privts=rur.getPrivateTrackerState();
assertNotNull(privts);
assertEquals(1,privts.getLocalizedResources().size());
assertEquals(privLocalizedProto,privts.getLocalizedResources().iterator().next());
assertTrue(privts.getInProgressResources().isEmpty());
assertEquals(1,rur.getAppTrackerStates().size());
appts=rur.getAppTrackerStates().get(appId);
assertNotNull(appts);
assertTrue(appts.getInProgressResources().isEmpty());
assertEquals(1,appts.getLocalizedResources().size());
assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainerTokenStorage() throws IOException {
RecoveredContainerTokensState state=stateStore.loadContainerTokensState();
assertNull(state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getActiveTokens().isEmpty());
ContainerTokenKeyGeneratorForTest keygen=new ContainerTokenKeyGeneratorForTest(new YarnConfiguration());
MasterKey currentKey=keygen.generateKey();
stateStore.storeContainerTokenCurrentMasterKey(currentKey);
restartStateStore();
state=stateStore.loadContainerTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertNull(state.getPreviousMasterKey());
assertTrue(state.getActiveTokens().isEmpty());
MasterKey prevKey=keygen.generateKey();
stateStore.storeContainerTokenPreviousMasterKey(prevKey);
restartStateStore();
state=stateStore.loadContainerTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
assertTrue(state.getActiveTokens().isEmpty());
ContainerId cid1=BuilderUtils.newContainerId(1,1,1,1);
Long expTime1=1234567890L;
ContainerId cid2=BuilderUtils.newContainerId(2,2,2,2);
Long expTime2=9876543210L;
stateStore.storeContainerToken(cid1,expTime1);
stateStore.storeContainerToken(cid2,expTime2);
restartStateStore();
state=stateStore.loadContainerTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
Map loadedActiveTokens=state.getActiveTokens();
assertEquals(2,loadedActiveTokens.size());
assertEquals(expTime1,loadedActiveTokens.get(cid1));
assertEquals(expTime2,loadedActiveTokens.get(cid2));
ContainerId cid3=BuilderUtils.newContainerId(3,3,3,3);
Long expTime3=135798642L;
stateStore.storeContainerToken(cid3,expTime3);
stateStore.removeContainerToken(cid1);
expTime2+=246897531L;
stateStore.storeContainerToken(cid2,expTime2);
prevKey=currentKey;
stateStore.storeContainerTokenPreviousMasterKey(prevKey);
currentKey=keygen.generateKey();
stateStore.storeContainerTokenCurrentMasterKey(currentKey);
restartStateStore();
state=stateStore.loadContainerTokensState();
assertEquals(currentKey,state.getCurrentMasterKey());
assertEquals(prevKey,state.getPreviousMasterKey());
loadedActiveTokens=state.getActiveTokens();
assertEquals(2,loadedActiveTokens.size());
assertNull(loadedActiveTokens.get(cid1));
assertEquals(expTime2,loadedActiveTokens.get(cid2));
assertEquals(expTime3,loadedActiveTokens.get(cid3));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRecovery() throws IOException {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
final NodeId nodeId=NodeId.newInstance("somehost",1234);
final ContainerId cid1=BuilderUtils.newContainerId(1,1,1,1);
final ContainerId cid2=BuilderUtils.newContainerId(2,2,2,2);
ContainerTokenKeyGeneratorForTest keygen=new ContainerTokenKeyGeneratorForTest(conf);
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
NMContainerTokenSecretManager secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
MasterKey currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
ContainerTokenIdentifier tokenId1=createContainerTokenId(cid1,nodeId,"user1",secretMgr);
ContainerTokenIdentifier tokenId2=createContainerTokenId(cid2,nodeId,"user2",secretMgr);
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertTrue(secretMgr.isValidStartContainerRequest(tokenId2));
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
secretMgr.startContainerSuccessful(tokenId2);
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
assertNotNull(secretMgr.retrievePassword(tokenId1));
assertNotNull(secretMgr.retrievePassword(tokenId2));
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr=new NMContainerTokenSecretManager(conf,stateStore);
secretMgr.setNodeId(nodeId);
secretMgr.recover();
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isValidStartContainerRequest(tokenId1));
assertFalse(secretMgr.isValidStartContainerRequest(tokenId2));
try {
secretMgr.retrievePassword(tokenId1);
fail("token should not be valid");
}
catch ( InvalidToken e) {
}
try {
secretMgr.retrievePassword(tokenId2);
fail("token should not be valid");
}
catch ( InvalidToken e) {
}
stateStore.close();
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRecovery() throws IOException {
YarnConfiguration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true);
final NodeId nodeId=NodeId.newInstance("somehost",1234);
final ApplicationAttemptId attempt1=ApplicationAttemptId.newInstance(ApplicationId.newInstance(1,1),1);
final ApplicationAttemptId attempt2=ApplicationAttemptId.newInstance(ApplicationId.newInstance(2,2),2);
NMTokenKeyGeneratorForTest keygen=new NMTokenKeyGeneratorForTest();
NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
NMTokenSecretManagerInNM secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.setNodeId(nodeId);
MasterKey currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
NMTokenIdentifier attemptToken1=getNMTokenId(secretMgr.createNMToken(attempt1,nodeId,"user1"));
NMTokenIdentifier attemptToken2=getNMTokenId(secretMgr.createNMToken(attempt2,nodeId,"user2"));
secretMgr.appAttemptStartContainer(attemptToken1);
secretMgr.appAttemptStartContainer(attemptToken2);
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey,secretMgr.getCurrentKey());
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr.appFinished(attempt1.getApplicationId());
secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey,secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
assertNotNull(secretMgr.retrievePassword(attemptToken1));
assertNotNull(secretMgr.retrievePassword(attemptToken2));
currentKey=keygen.generateKey();
secretMgr.setMasterKey(currentKey);
secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey,secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
}
catch ( InvalidToken e) {
}
assertNotNull(secretMgr.retrievePassword(attemptToken2));
secretMgr.appFinished(attempt2.getApplicationId());
secretMgr=new NMTokenSecretManagerInNM(stateStore);
secretMgr.recover();
secretMgr.setNodeId(nodeId);
assertEquals(currentKey,secretMgr.getCurrentKey());
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1));
assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2));
try {
secretMgr.retrievePassword(attemptToken1);
fail("attempt token should not still be valid");
}
catch ( InvalidToken e) {
}
try {
secretMgr.retrievePassword(attemptToken2);
fail("attempt token should not still be valid");
}
catch ( InvalidToken e) {
}
stateStore.close();
}
InternalCallVerifier BooleanVerifier
@Test public void testDeleteCgroup() throws Exception {
final MockClock clock=new MockClock();
clock.time=System.currentTimeMillis();
CgroupsLCEResourcesHandler handler=new CgroupsLCEResourcesHandler();
handler.setConf(new YarnConfiguration());
handler.initConfig();
handler.clock=clock;
File file=new File("target",UUID.randomUUID().toString());
new FileOutputStream(file).close();
Assert.assertTrue(handler.deleteCgroup(file.getPath()));
final CountDownLatch latch=new CountDownLatch(1);
new Thread(){
@Override public void run(){
latch.countDown();
try {
Thread.sleep(200);
}
catch ( InterruptedException ex) {
}
clock.time+=YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT;
}
}
.start();
latch.await();
file=new File("target",UUID.randomUUID().toString());
Assert.assertFalse(handler.deleteCgroup(file.getPath()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test(timeout=30000) public void testContainerLogDirs() throws IOException, YarnException {
File absLogDir=new File("target",TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile();
String logdirwithFile=absLogDir.toURI().toString();
Configuration conf=new Configuration();
conf.set(YarnConfiguration.NM_LOG_DIRS,logdirwithFile);
NodeHealthCheckerService healthChecker=new NodeHealthCheckerService();
healthChecker.init(conf);
LocalDirsHandlerService dirsHandler=healthChecker.getDiskHandler();
NMContext nmContext=new NodeManager.NMContext(null,null,dirsHandler,new ApplicationACLsManager(conf),new NMNullStateStoreService());
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(conf);
String user="nobody";
long clusterTimeStamp=1234;
ApplicationId appId=BuilderUtils.newApplicationId(recordFactory,clusterTimeStamp,1);
Application app=mock(Application.class);
when(app.getUser()).thenReturn(user);
when(app.getAppId()).thenReturn(appId);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
ContainerId container1=BuilderUtils.newContainerId(recordFactory,appId,appAttemptId,0);
nmContext.getApplications().put(appId,app);
MockContainer container=new MockContainer(appAttemptId,new AsyncDispatcher(),conf,user,appId,1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(container1,container);
List files=null;
files=ContainerLogsUtils.getContainerLogDirs(container1,user,nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
nmContext.getContainers().remove(container1);
Assert.assertNull(nmContext.getContainers().get(container1));
files=ContainerLogsUtils.getContainerLogDirs(container1,user,nmContext);
Assert.assertTrue(!(files.get(0).toString().contains("file:")));
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeInfo() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node").path("info").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri2() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeInfoSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node").path("info/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleNodesXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node").path("info/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("nodeInfo");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyNodesXML(nodes);
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testNode() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("node").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeInfoDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node").path("info").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyNodeInfo(json);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainerLogs() throws IOException {
WebResource r=resource();
final ContainerId containerId=BuilderUtils.newContainerId(0,0,0,0);
final String containerIdStr=BuilderUtils.newContainerId(0,0,0,0).toString();
final ApplicationAttemptId appAttemptId=containerId.getApplicationAttemptId();
final ApplicationId appId=appAttemptId.getApplicationId();
final String appIdStr=appId.toString();
final String filename="logfile1";
final String logMessage="log message\n";
nmContext.getApplications().put(appId,new ApplicationImpl(null,"user",appId,null,nmContext));
MockContainer container=new MockContainer(appAttemptId,new AsyncDispatcher(),new Configuration(),"user",appId,1);
container.setState(ContainerState.RUNNING);
nmContext.getContainers().put(containerId,container);
Path path=dirsHandler.getLogPathForWrite(ContainerLaunch.getRelativeContainerLogDir(appIdStr,containerIdStr) + "/" + filename,false);
File logFile=new File(path.toUri().getPath());
logFile.deleteOnExit();
assertTrue("Failed to create log dir",logFile.getParentFile().mkdirs());
PrintWriter pw=new PrintWriter(logFile);
pw.print(logMessage);
pw.close();
ClientResponse response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
String responseText=response.getEntity(String.class);
assertEquals(logMessage,responseText);
response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path("uhhh").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
Assert.assertEquals(Status.NOT_FOUND.getStatusCode(),response.getStatus());
responseText=response.getEntity(String.class);
assertTrue(responseText.contains("Cannot find this log on the local disk."));
nmContext.getContainers().remove(containerId);
Assert.assertNull(nmContext.getContainers().get(containerId));
response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
responseText=response.getEntity(String.class);
assertEquals(logMessage,responseText);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidAccept() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("node").accept(MediaType.TEXT_PLAIN).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNodeSingleAppsMissing() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps").path("application_1234_0009").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id application_1234_0009 not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeAppsStateNone() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp("foo",1234,2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").queryParam("state",ApplicationState.INITING.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNodeAppsUserEmpty() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp("foo",1234,2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps").queryParam("user","").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Error: You must specify a non-empty string for the user",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodeAppsXML() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",2,nodes.getLength());
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeAppsNone() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("apps isn't NULL",JSONObject.NULL,json.get("apps"));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNodeAppsStateInvalidDefault() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp("foo",1234,2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps").queryParam("state","FOO_STATE").get(JSONObject.class);
fail("should have thrown exception on invalid user query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
verifyStateInvalidException(message,type,classname);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNodeAppsStateInvalid() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp("foo",1234,2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps").queryParam("state","FOO_STATE").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
verifyStateInvalidException(message,type,classname);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeAppsUserNone() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp("foo",1234,2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").queryParam("user","george").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNodeSingleAppsInvalid() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps").path("app_foo_0000").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","For input string: \"foo\"",message);
WebServicesTestUtils.checkStringMatch("exception type","NumberFormatException",type);
WebServicesTestUtils.checkStringMatch("exception classname","java.lang.NumberFormatException",classname);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNodeAppsStateInvalidXML() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp("foo",1234,2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("apps").queryParam("state","FOO_STATE").accept(MediaType.APPLICATION_XML).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String msg=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(msg));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("RemoteException");
Element element=(Element)nodes.item(0);
String message=WebServicesTestUtils.getXmlString(element,"message");
String type=WebServicesTestUtils.getXmlString(element,"exception");
String classname=WebServicesTestUtils.getXmlString(element,"javaClassName");
verifyStateInvalidException(message,type,classname);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeSingleAppsSlash() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
HashMap hash=addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").path(app.getAppId().toString() + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyNodeAppInfo(json.getJSONObject("app"),app,hash);
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeAppsState() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
MockApp app2=new MockApp("foo",1234,2);
nmContext.getApplications().put(app2.getAppId(),app2);
HashMap hash2=addAppContainers(app2);
app2.setState(ApplicationState.RUNNING);
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").queryParam("state",ApplicationState.RUNNING.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
JSONObject info=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,info.length());
JSONArray appInfo=info.getJSONArray("app");
assertEquals("incorrect number of elements",1,appInfo.length());
verifyNodeAppInfo(appInfo.getJSONObject(0),app2,hash2);
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeAppsUser() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
HashMap hash=addAppContainers(app);
Application app2=new MockApp("foo",1234,2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").queryParam("user","mockUser").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
JSONObject info=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,info.length());
JSONArray appInfo=info.getJSONArray("app");
assertEquals("incorrect number of elements",1,appInfo.length());
verifyNodeAppInfo(appInfo.getJSONObject(0),app,hash);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodeSingleAppsXML() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
HashMap hash=addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("apps").path(app.getAppId().toString() + "/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyNodeAppInfoXML(nodes,app,hash);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSingleContainerInvalid() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("containers").path("container_foo_1234").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: invalid container id, container_foo_1234",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSingleContainerWrong() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("containers").path("container_1234_0001_01_000005").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: container with id, container_1234_0001_01_000005, not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testSingleContainerInvalid2() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
try {
r.path("ws").path("v1").path("node").path("containers").path("container_1234_0001").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid user query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: invalid container id, container_1234_0001",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodeSingleContainerXML() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
HashMap hash=addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
for ( String id : hash.keySet()) {
ClientResponse response=r.path("ws").path("v1").path("node").path("containers").path(id).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("container");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyContainersInfoXML(nodes,nmContext.getContainers().get(ConverterUtils.toContainerId(id)));
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodeContainerXML() throws JSONException, Exception {
WebResource r=resource();
Application app=new MockApp(1);
nmContext.getApplications().put(app.getAppId(),app);
addAppContainers(app);
Application app2=new MockApp(2);
nmContext.getApplications().put(app2.getAppId(),app2);
addAppContainers(app2);
ClientResponse response=r.path("ws").path("v1").path("node").path("containers").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("container");
assertEquals("incorrect number of elements",4,nodes.getLength());
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeContainersNone() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("node").path("containers").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("apps isn't NULL",JSONObject.NULL,json.get("containers"));
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAuthorizedAccess() throws Exception {
MyContainerManager containerManager=new MyContainerManager();
rm=new MockRMWithAMS(conf,containerManager);
rm.start();
MockNM nm1=rm.registerNode("localhost:1234",5120);
Map acls=new HashMap(2);
acls.put(ApplicationAccessType.VIEW_APP,"*");
RMApp app=rm.submitApp(1024,"appname","appuser",acls);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.containerTokens == null && waitCount++ < 20) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
waitForLaunchedState(attempt);
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,rm.getApplicationMasterService().getBindAddress(),conf);
}
}
);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
RegisterApplicationMasterResponse response=client.registerApplicationMaster(request);
Assert.assertNotNull(response.getClientToAMTokenMasterKey());
if (UserGroupInformation.isSecurityEnabled()) {
Assert.assertTrue(response.getClientToAMTokenMasterKey().array().length > 0);
}
Assert.assertEquals("Register response has bad ACLs","*",response.getApplicationACLs().get(ApplicationAccessType.VIEW_APP));
}
InternalCallVerifier EqualityVerifier
@Test public void testRMAppRetireNullApp() throws Exception {
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,new Configuration());
Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size());
appMonitor.finishApplication(null);
Assert.assertEquals("Number of completed apps incorrect after check",0,appMonitor.getCompletedAppsListSize());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRMAppSubmitDuplicateApplicationId() throws Exception {
ApplicationId appId=MockApps.newAppID(0);
asContext.setApplicationId(appId);
RMApp appOrig=rmContext.getRMApps().get(appId);
Assert.assertTrue("app name matches but shouldn't","testApp1" != appOrig.getName());
try {
appMonitor.submitApplication(asContext,"test");
Assert.fail("Exception is expected when applicationId is duplicate.");
}
catch ( YarnException e) {
Assert.assertTrue("The thrown exception is not the expectd one.",e.getMessage().contains("Cannot add a duplicate!"));
}
RMApp app=rmContext.getRMApps().get(appId);
Assert.assertNotNull("app is null",app);
Assert.assertEquals("app id doesn't match",appId,app.getApplicationId());
Assert.assertEquals("app state doesn't match",RMAppState.FINISHED,app.getState());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testRMAppSubmitMaxAppAttempts() throws Exception {
int[] globalMaxAppAttempts=new int[]{10,1};
int[][] individualMaxAppAttempts=new int[][]{new int[]{9,10,11,0},new int[]{1,10,0,-1}};
int[][] expectedNums=new int[][]{new int[]{9,10,10,10},new int[]{1,1,1,1}};
for (int i=0; i < globalMaxAppAttempts.length; ++i) {
for (int j=0; j < individualMaxAppAttempts.length; ++j) {
ResourceScheduler scheduler=mockResourceScheduler();
Configuration conf=new Configuration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,globalMaxAppAttempts[i]);
ApplicationMasterService masterService=new ApplicationMasterService(rmContext,scheduler);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,new ClientToAMTokenSecretManagerInRM(),scheduler,masterService,new ApplicationACLsManager(conf),conf);
ApplicationId appID=MockApps.newAppID(i * 4 + j + 1);
asContext.setApplicationId(appID);
if (individualMaxAppAttempts[i][j] != 0) {
asContext.setMaxAppAttempts(individualMaxAppAttempts[i][j]);
}
appMonitor.submitApplication(asContext,"test");
RMApp app=rmContext.getRMApps().get(appID);
Assert.assertEquals("max application attempts doesn't match",expectedNums[i][j],app.getMaxAppAttempts());
int timeoutSecs=0;
while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) {
Thread.sleep(1000);
}
setAppEventType(RMAppEventType.KILL);
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testRMAppRetireSome() throws Exception {
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,3);
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,3);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size());
addToCompletedApps(appMonitor,rmContext);
appMonitor.checkAppNumCompletedLimit();
Assert.assertEquals("Number of apps incorrect after # completed check",3,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",3,appMonitor.getCompletedAppsListSize());
verify(rmContext.getStateStore(),times(7)).removeApplication(isA(RMApp.class));
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testEscapeApplicationSummary(){
RMApp app=mock(RMAppImpl.class);
when(app.getApplicationId()).thenReturn(ApplicationId.newInstance(100L,1));
when(app.getName()).thenReturn("Multiline\n\n\r\rAppName");
when(app.getUser()).thenReturn("Multiline\n\n\r\rUserName");
when(app.getQueue()).thenReturn("Multiline\n\n\r\rQueueName");
when(app.getState()).thenReturn(RMAppState.RUNNING);
RMAppManager.ApplicationSummary.SummaryBuilder summary=new RMAppManager.ApplicationSummary().createAppSummary(app);
String msg=summary.toString();
LOG.info("summary: " + msg);
Assert.assertFalse(msg.contains("\n"));
Assert.assertFalse(msg.contains("\r"));
String escaped="\\n\\n\\r\\r";
Assert.assertTrue(msg.contains("Multiline" + escaped + "AppName"));
Assert.assertTrue(msg.contains("Multiline" + escaped + "UserName"));
Assert.assertTrue(msg.contains("Multiline" + escaped + "QueueName"));
}
InternalCallVerifier EqualityVerifier
@Test public void testRMAppRetireSomeDifferentStates() throws Exception {
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,2);
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,2);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
rmContext.getRMApps().clear();
Assert.assertEquals("map isn't empty",0,rmContext.getRMApps().size());
RMApp app=new MockRMApp(0,now - 20000,RMAppState.KILLED);
rmContext.getRMApps().put(app.getApplicationId(),app);
app=new MockRMApp(1,now - 200000,RMAppState.FAILED);
rmContext.getRMApps().put(app.getApplicationId(),app);
app=new MockRMApp(2,now - 30000,RMAppState.FINISHED);
rmContext.getRMApps().put(app.getApplicationId(),app);
app=new MockRMApp(3,now - 20000,RMAppState.RUNNING);
rmContext.getRMApps().put(app.getApplicationId(),app);
app=new MockRMApp(4,now - 20000,RMAppState.NEW);
rmContext.getRMApps().put(app.getApplicationId(),app);
app=new MockRMApp(5,now - 10001,RMAppState.KILLED);
rmContext.getRMApps().put(app.getApplicationId(),app);
app=new MockRMApp(6,now - 30000,RMAppState.ACCEPTED);
rmContext.getRMApps().put(app.getApplicationId(),app);
app=new MockRMApp(7,now - 20000,RMAppState.SUBMITTED);
rmContext.getRMApps().put(app.getApplicationId(),app);
app=new MockRMApp(8,now - 10001,RMAppState.FAILED);
rmContext.getRMApps().put(app.getApplicationId(),app);
app=new MockRMApp(9,now - 20000,RMAppState.FAILED);
rmContext.getRMApps().put(app.getApplicationId(),app);
Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size());
addToCompletedApps(appMonitor,rmContext);
appMonitor.checkAppNumCompletedLimit();
Assert.assertEquals("Number of apps incorrect after # completed check",6,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",2,appMonitor.getCompletedAppsListSize());
verify(rmContext.getStateStore(),times(4)).removeApplication(isA(RMApp.class));
}
InternalCallVerifier EqualityVerifier
@Test public void testRMAppRetireZeroSetting() throws Exception {
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,0);
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,0);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size());
addToCompletedApps(appMonitor,rmContext);
Assert.assertEquals("Number of completed apps incorrect",10,appMonitor.getCompletedAppsListSize());
appMonitor.checkAppNumCompletedLimit();
Assert.assertEquals("Number of apps incorrect after # completed check",0,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",0,appMonitor.getCompletedAppsListSize());
verify(rmContext.getStateStore(),times(10)).removeApplication(isA(RMApp.class));
}
InternalCallVerifier EqualityVerifier
@Test public void testRMAppRetireNone() throws Exception {
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 10);
Configuration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,10);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
Assert.assertEquals("Number of apps incorrect before checkAppTimeLimit",10,rmContext.getRMApps().size());
addToCompletedApps(appMonitor,rmContext);
appMonitor.checkAppNumCompletedLimit();
Assert.assertEquals("Number of apps incorrect after # completed check",10,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",10,appMonitor.getCompletedAppsListSize());
verify(rmContext.getStateStore(),never()).removeApplication(isA(RMApp.class));
}
InternalCallVerifier EqualityVerifier
@Test public void testStateStoreAppLimitLargerThanMemoryAppLimit(){
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
Configuration conf=new YarnConfiguration();
int maxAppsInMemory=8;
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,maxAppsInMemory);
conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,1000);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
addToCompletedApps(appMonitor,rmContext);
Assert.assertEquals("Number of completed apps incorrect",10,appMonitor.getCompletedAppsListSize());
appMonitor.checkAppNumCompletedLimit();
int numRemoveApps=10 - maxAppsInMemory;
Assert.assertEquals("Number of apps incorrect after # completed check",maxAppsInMemory,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",maxAppsInMemory,appMonitor.getCompletedAppsListSize());
verify(rmContext.getStateStore(),times(numRemoveApps)).removeApplication(isA(RMApp.class));
Assert.assertEquals(maxAppsInMemory,appMonitor.getCompletedAppsInStateStore());
}
InternalCallVerifier EqualityVerifier
@Test public void testStateStoreAppLimitLessThanMemoryAppLimit(){
long now=System.currentTimeMillis();
RMContext rmContext=mockRMContext(10,now - 20000);
Configuration conf=new YarnConfiguration();
int maxAppsInMemory=8;
int maxAppsInStateStore=4;
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,maxAppsInMemory);
conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,maxAppsInStateStore);
TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf);
addToCompletedApps(appMonitor,rmContext);
Assert.assertEquals("Number of completed apps incorrect",10,appMonitor.getCompletedAppsListSize());
appMonitor.checkAppNumCompletedLimit();
Assert.assertEquals("Number of apps incorrect after # completed check",maxAppsInMemory,rmContext.getRMApps().size());
Assert.assertEquals("Number of completed apps incorrect after check",maxAppsInMemory,appMonitor.getCompletedAppsListSize());
int numRemoveAppsFromStateStore=10 - maxAppsInStateStore;
verify(rmContext.getStateStore(),times(numRemoveAppsFromStateStore)).removeApplication(isA(RMApp.class));
Assert.assertEquals(maxAppsInStateStore,appMonitor.getCompletedAppsInStateStore());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRMAppSubmit() throws Exception {
appMonitor.submitApplication(asContext,"test");
RMApp app=rmContext.getRMApps().get(appId);
Assert.assertNotNull("app is null",app);
Assert.assertEquals("app id doesn't match",appId,app.getApplicationId());
Assert.assertEquals("app state doesn't match",RMAppState.NEW,app.getState());
int timeoutSecs=0;
while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) {
Thread.sleep(1000);
}
Assert.assertEquals("app event type sent is wrong",RMAppEventType.START,getAppEventType());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@SuppressWarnings("resource") @Test public void testContainerCleanup() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
final DrainDispatcher dispatcher=new DrainDispatcher();
MockRM rm=new MockRM(){
@Override protected EventHandler createSchedulerEventDispatcher(){
return new SchedulerEventDispatcher(this.scheduler){
@Override public void handle( SchedulerEvent event){
scheduler.handle(event);
}
}
;
}
@Override protected Dispatcher createDispatcher(){
return dispatcher;
}
}
;
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",5000);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
int request=2;
am.allocate("127.0.0.1",1000,request,new ArrayList());
dispatcher.await();
nm1.nodeHeartbeat(true);
List conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
int contReceived=conts.size();
int waitCount=0;
while (contReceived < request && waitCount++ < 200) {
LOG.info("Got " + contReceived + " containers. Waiting to get "+ request);
Thread.sleep(100);
conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
dispatcher.await();
contReceived+=conts.size();
nm1.nodeHeartbeat(true);
}
Assert.assertEquals(request,contReceived);
ArrayList release=new ArrayList();
release.add(conts.get(0).getId());
am.allocate(new ArrayList(),release);
dispatcher.await();
Map> containerStatuses=new HashMap>();
ArrayList containerStatusList=new ArrayList();
containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(0).getId(),ContainerState.RUNNING,"nothing",0));
containerStatuses.put(app.getApplicationId(),containerStatusList);
NodeHeartbeatResponse resp=nm1.nodeHeartbeat(containerStatuses,true);
waitForContainerCleanup(dispatcher,nm1,resp);
LOG.info("Testing container launch much after release and " + "NM getting cleanup");
containerStatuses.clear();
containerStatusList.clear();
containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(0).getId(),ContainerState.RUNNING,"nothing",0));
containerStatuses.put(app.getApplicationId(),containerStatusList);
resp=nm1.nodeHeartbeat(containerStatuses,true);
waitForContainerCleanup(dispatcher,nm1,resp);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@SuppressWarnings("resource") @Test public void testAppCleanup() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm=new MockRM();
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",5000);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
int request=2;
am.allocate("127.0.0.1",1000,request,new ArrayList());
nm1.nodeHeartbeat(true);
List conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
int contReceived=conts.size();
int waitCount=0;
while (contReceived < request && waitCount++ < 200) {
LOG.info("Got " + contReceived + " containers. Waiting to get "+ request);
Thread.sleep(100);
conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
contReceived+=conts.size();
nm1.nodeHeartbeat(true);
}
Assert.assertEquals(request,contReceived);
am.unregisterAppAttempt();
NodeHeartbeatResponse resp=nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
resp=nm1.nodeHeartbeat(true);
List containersToCleanup=resp.getContainersToCleanup();
List appsToCleanup=resp.getApplicationsToCleanup();
int numCleanedContainers=containersToCleanup.size();
int numCleanedApps=appsToCleanup.size();
waitCount=0;
while ((numCleanedContainers < 2 || numCleanedApps < 1) && waitCount++ < 200) {
LOG.info("Waiting to get cleanup events.. cleanedConts: " + numCleanedContainers + " cleanedApps: "+ numCleanedApps);
Thread.sleep(100);
resp=nm1.nodeHeartbeat(true);
List deltaContainersToCleanup=resp.getContainersToCleanup();
List deltaAppsToCleanup=resp.getApplicationsToCleanup();
containersToCleanup.addAll(deltaContainersToCleanup);
appsToCleanup.addAll(deltaAppsToCleanup);
numCleanedContainers=containersToCleanup.size();
numCleanedApps=appsToCleanup.size();
}
Assert.assertEquals(1,appsToCleanup.size());
Assert.assertEquals(app.getApplicationId(),appsToCleanup.get(0));
Assert.assertEquals(1,numCleanedApps);
Assert.assertEquals(2,numCleanedContainers);
rm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAMLaunchAndCleanup() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MyContainerManagerImpl containerManager=new MyContainerManagerImpl();
MockRMWithCustomAMLauncher rm=new MockRMWithCustomAMLauncher(containerManager);
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",5120);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.launched == false && waitCount++ < 20) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertTrue(containerManager.launched);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId appAttemptId=attempt.getAppAttemptId();
Assert.assertEquals(appAttemptId.toString(),containerManager.attemptIdAtContainerManager);
Assert.assertEquals(app.getSubmitTime(),containerManager.submitTimeAtContainerManager);
Assert.assertEquals(app.getRMAppAttempt(appAttemptId).getMasterContainer().getId().toString(),containerManager.containerIdAtContainerManager);
Assert.assertEquals(nm1.getNodeId().toString(),containerManager.nmHostAtContainerManager);
Assert.assertEquals(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS,containerManager.maxAppAttempts);
MockAM am=new MockAM(rm.getRMContext(),rm.getApplicationMasterService(),appAttemptId);
am.registerAppAttempt();
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
waitCount=0;
while (containerManager.cleanedup == false && waitCount++ < 20) {
LOG.info("Waiting for AM Cleanup to happen..");
Thread.sleep(1000);
}
Assert.assertTrue(containerManager.cleanedup);
am.waitForState(RMAppAttemptState.FINISHED);
rm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("unused") @Test(timeout=100000) public void testallocateBeforeAMRegistration() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
boolean thrown=false;
rootLogger.setLevel(Level.DEBUG);
MockRM rm=new MockRM();
rm.start();
MockNM nm1=rm.registerNode("h1:1234",5000);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
int request=2;
AllocateResponse ar=am.allocate("h1",1000,request,new ArrayList());
Assert.assertTrue(ar.getAMCommand() == AMCommand.AM_RESYNC);
nm1.nodeHeartbeat(true);
AllocateResponse amrs=am.allocate(new ArrayList(),new ArrayList());
Assert.assertTrue(ar.getAMCommand() == AMCommand.AM_RESYNC);
am.registerAppAttempt();
thrown=false;
try {
am.registerAppAttempt(false);
}
catch ( Exception e) {
Assert.assertEquals("Application Master is already registered : " + attempt.getAppAttemptId().getApplicationId(),e.getMessage());
thrown=true;
}
Assert.assertTrue(thrown);
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
AllocateResponse amrs2=am.allocate(new ArrayList(),new ArrayList());
Assert.assertTrue(amrs2.getAMCommand() == AMCommand.AM_SHUTDOWN);
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=600000) public void testInvalidContainerReleaseRequest() throws Exception {
MockRM rm=new MockRM(conf);
try {
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB);
RMApp app1=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.addRequests(new String[]{"127.0.0.1"},GB,1,1);
AllocateResponse alloc1Response=am1.schedule();
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
sleep(1000);
alloc1Response=am1.schedule();
}
Assert.assertTrue(alloc1Response.getAllocatedContainers().size() > 0);
RMApp app2=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt2=app2.getCurrentAppAttempt();
MockAM am2=rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
ContainerId cId=alloc1Response.getAllocatedContainers().get(0).getId();
am2.addContainerToBeReleased(cId);
try {
am2.schedule();
Assert.fail("Exception was expected!!");
}
catch ( InvalidContainerReleaseException e) {
StringBuilder sb=new StringBuilder("Cannot release container : ");
sb.append(cId.toString());
sb.append(" not belonging to this application attempt : ");
sb.append(attempt2.getAppAttemptId().toString());
Assert.assertTrue(e.getMessage().contains(sb.toString()));
}
}
finally {
if (rm != null) {
rm.stop();
}
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=3000000) public void testRMIdentifierOnContainerAllocation() throws Exception {
MockRM rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB);
RMApp app1=rm.submitApp(2048);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.addRequests(new String[]{"127.0.0.1"},GB,1,1);
AllocateResponse alloc1Response=am1.schedule();
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
sleep(1000);
alloc1Response=am1.schedule();
}
Container allocatedContainer=alloc1Response.getAllocatedContainers().get(0);
ContainerTokenIdentifier tokenId=BuilderUtils.newContainerTokenIdentifier(allocatedContainer.getContainerToken());
Assert.assertEquals(MockRM.getClusterTimeStamp(),tokenId.getRMIdentifer());
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testGetQueueInfo() throws Exception {
YarnScheduler yarnScheduler=mock(YarnScheduler.class);
RMContext rmContext=mock(RMContext.class);
mockRMContext(yarnScheduler,rmContext);
ClientRMService rmService=new ClientRMService(rmContext,yarnScheduler,null,null,null,null);
GetQueueInfoRequest request=recordFactory.newRecordInstance(GetQueueInfoRequest.class);
request.setQueueName("testqueue");
request.setIncludeApplications(true);
GetQueueInfoResponse queueInfo=rmService.getQueueInfo(request);
List applications=queueInfo.getQueueInfo().getApplications();
Assert.assertEquals(2,applications.size());
request.setQueueName("nonexistentqueue");
request.setIncludeApplications(true);
queueInfo=rmService.getQueueInfo(request);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetApplicationReport() throws YarnException {
RMContext rmContext=mock(RMContext.class);
when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap());
ClientRMService rmService=new ClientRMService(rmContext,null,null,null,null,null);
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null);
GetApplicationReportRequest request=recordFactory.newRecordInstance(GetApplicationReportRequest.class);
request.setApplicationId(ApplicationId.newInstance(0,0));
try {
rmService.getApplicationReport(request);
Assert.fail();
}
catch ( ApplicationNotFoundException ex) {
Assert.assertEquals(ex.getMessage(),"Application with id '" + request.getApplicationId() + "' doesn't exist in RM.");
}
}
InternalCallVerifier EqualityVerifier
@Test public void testGetApplicationResourceUsageReportDummy() throws YarnException, IOException {
ApplicationAttemptId attemptId=getApplicationAttemptId(1);
YarnScheduler yarnScheduler=mockYarnScheduler();
RMContext rmContext=mock(RMContext.class);
mockRMContext(yarnScheduler,rmContext);
when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler(){
public void handle( Event event){
}
}
);
ApplicationSubmissionContext asContext=mock(ApplicationSubmissionContext.class);
YarnConfiguration config=new YarnConfiguration();
RMAppAttemptImpl rmAppAttemptImpl=new RMAppAttemptImpl(attemptId,rmContext,yarnScheduler,null,asContext,config,false);
ApplicationResourceUsageReport report=rmAppAttemptImpl.getApplicationResourceUsageReport();
assertEquals(report,RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetContainerReport() throws YarnException, IOException {
ClientRMService rmService=createRMService();
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null);
GetContainerReportRequest request=recordFactory.newRecordInstance(GetContainerReportRequest.class);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
request.setContainerId(containerId);
try {
GetContainerReportResponse response=rmService.getContainerReport(request);
Assert.assertEquals(containerId,response.getContainerReport().getContainerId());
}
catch ( ApplicationNotFoundException ex) {
Assert.fail(ex.getMessage());
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testForceKillApplication() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
MockRM rm=new MockRM();
rm.init(conf);
rm.start();
ClientRMService rmService=rm.getClientRMService();
GetApplicationsRequest getRequest=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.KILLED));
RMApp app1=rm.submitApp(1024);
RMApp app2=rm.submitApp(1024,true);
assertEquals("Incorrect number of apps in the RM",0,rmService.getApplications(getRequest).getApplicationList().size());
KillApplicationRequest killRequest1=KillApplicationRequest.newInstance(app1.getApplicationId());
KillApplicationRequest killRequest2=KillApplicationRequest.newInstance(app2.getApplicationId());
int killAttemptCount=0;
for (int i=0; i < 100; i++) {
KillApplicationResponse killResponse1=rmService.forceKillApplication(killRequest1);
killAttemptCount++;
if (killResponse1.getIsKillCompleted()) {
break;
}
Thread.sleep(10);
}
assertTrue("Kill attempt count should be greater than 1 for managed AMs",killAttemptCount > 1);
assertEquals("Incorrect number of apps in the RM",1,rmService.getApplications(getRequest).getApplicationList().size());
KillApplicationResponse killResponse2=rmService.forceKillApplication(killRequest2);
assertTrue("Killing UnmanagedAM should falsely acknowledge true",killResponse2.getIsKillCompleted());
for (int i=0; i < 100; i++) {
if (2 == rmService.getApplications(getRequest).getApplicationList().size()) {
break;
}
Thread.sleep(10);
}
assertEquals("Incorrect number of apps in the RM",2,rmService.getApplications(getRequest).getApplicationList().size());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) @SuppressWarnings("rawtypes") public void testAppSubmit() throws Exception {
YarnScheduler yarnScheduler=mockYarnScheduler();
RMContext rmContext=mock(RMContext.class);
mockRMContext(yarnScheduler,rmContext);
RMStateStore stateStore=mock(RMStateStore.class);
when(rmContext.getStateStore()).thenReturn(stateStore);
RMAppManager appManager=new RMAppManager(rmContext,yarnScheduler,null,mock(ApplicationACLsManager.class),new Configuration());
when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler(){
public void handle( Event event){
}
}
);
ApplicationId appId1=getApplicationId(100);
ApplicationACLsManager mockAclsManager=mock(ApplicationACLsManager.class);
when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),ApplicationAccessType.VIEW_APP,null,appId1)).thenReturn(true);
QueueACLsManager mockQueueACLsManager=mock(QueueACLsManager.class);
when(mockQueueACLsManager.checkAccess(any(UserGroupInformation.class),any(QueueACL.class),anyString())).thenReturn(true);
ClientRMService rmService=new ClientRMService(rmContext,yarnScheduler,appManager,mockAclsManager,mockQueueACLsManager,null);
SubmitApplicationRequest submitRequest1=mockSubmitAppRequest(appId1,null,null);
try {
rmService.submitApplication(submitRequest1);
}
catch ( YarnException e) {
Assert.fail("Exception is not expected.");
}
RMApp app1=rmContext.getRMApps().get(appId1);
Assert.assertNotNull("app doesn't exist",app1);
Assert.assertEquals("app name doesn't match",YarnConfiguration.DEFAULT_APPLICATION_NAME,app1.getName());
Assert.assertEquals("app queue doesn't match",YarnConfiguration.DEFAULT_QUEUE_NAME,app1.getQueue());
String name=MockApps.newAppName();
String queue=MockApps.newQueue();
ApplicationId appId2=getApplicationId(101);
SubmitApplicationRequest submitRequest2=mockSubmitAppRequest(appId2,name,queue);
submitRequest2.getApplicationSubmissionContext().setApplicationType("matchType");
try {
rmService.submitApplication(submitRequest2);
}
catch ( YarnException e) {
Assert.fail("Exception is not expected.");
}
RMApp app2=rmContext.getRMApps().get(appId2);
Assert.assertNotNull("app doesn't exist",app2);
Assert.assertEquals("app name doesn't match",name,app2.getName());
Assert.assertEquals("app queue doesn't match",queue,app2.getQueue());
try {
rmService.submitApplication(submitRequest2);
}
catch ( YarnException e) {
Assert.fail("Exception is not expected.");
}
GetApplicationsRequest getAllAppsRequest=GetApplicationsRequest.newInstance(new HashSet());
GetApplicationsResponse getAllApplicationsResponse=rmService.getApplications(getAllAppsRequest);
Assert.assertEquals(5,getAllApplicationsResponse.getApplicationList().size());
Set appTypes=new HashSet();
appTypes.add("matchType");
getAllAppsRequest=GetApplicationsRequest.newInstance(appTypes);
getAllApplicationsResponse=rmService.getApplications(getAllAppsRequest);
Assert.assertEquals(1,getAllApplicationsResponse.getApplicationList().size());
Assert.assertEquals(appId2,getAllApplicationsResponse.getApplicationList().get(0).getApplicationId());
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testTokenRenewalWrongUser() throws Exception {
try {
owner.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
checkTokenRenewal(owner,other);
return null;
}
catch ( YarnException ex) {
Assert.assertTrue(ex.getMessage().contains(owner.getUserName() + " tries to renew a token with renewer " + other.getUserName()));
throw ex;
}
}
}
);
}
catch ( Exception e) {
return;
}
Assert.fail("renew should have failed");
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testGetClusterNodes() throws Exception {
MockRM rm=new MockRM(){
protected ClientRMService createClientRMService(){
return new ClientRMService(this.rmContext,scheduler,this.rmAppManager,this.applicationACLsManager,this.queueACLsManager,this.getRMContext().getRMDelegationTokenSecretManager());
}
}
;
rm.start();
MockNM node=rm.registerNode("host1:1234",1024);
rm.sendNodeStarted(node);
node.nodeHeartbeat(true);
MockNM lostNode=rm.registerNode("host2:1235",1024);
rm.sendNodeStarted(lostNode);
lostNode.nodeHeartbeat(true);
rm.NMwaitForState(lostNode.getNodeId(),NodeState.RUNNING);
rm.sendNodeLost(lostNode);
Configuration conf=new Configuration();
YarnRPC rpc=YarnRPC.create(conf);
InetSocketAddress rmAddress=rm.getClientRMService().getBindAddress();
LOG.info("Connecting to ResourceManager at " + rmAddress);
ApplicationClientProtocol client=(ApplicationClientProtocol)rpc.getProxy(ApplicationClientProtocol.class,rmAddress,conf);
GetClusterNodesRequest request=GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.RUNNING));
List nodeReports=client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(1,nodeReports.size());
Assert.assertNotSame("Node is expected to be healthy!",NodeState.UNHEALTHY,nodeReports.get(0).getNodeState());
node.nodeHeartbeat(false);
nodeReports=client.getClusterNodes(request).getNodeReports();
Assert.assertEquals("Unhealthy nodes should not show up by default",0,nodeReports.size());
request=GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.UNHEALTHY));
nodeReports=client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(1,nodeReports.size());
Assert.assertEquals("Node is expected to be unhealthy!",NodeState.UNHEALTHY,nodeReports.get(0).getNodeState());
rm.registerNode("host3:1236",1024);
request=GetClusterNodesRequest.newInstance(EnumSet.allOf(NodeState.class));
nodeReports=client.getClusterNodes(request).getNodeReports();
Assert.assertEquals(3,nodeReports.size());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetApplicationAttemptReport() throws YarnException, IOException {
ClientRMService rmService=createRMService();
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null);
GetApplicationAttemptReportRequest request=recordFactory.newRecordInstance(GetApplicationAttemptReportRequest.class);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1);
request.setApplicationAttemptId(attemptId);
try {
GetApplicationAttemptReportResponse response=rmService.getApplicationAttemptReport(request);
Assert.assertEquals(attemptId,response.getApplicationAttemptReport().getApplicationAttemptId());
}
catch ( ApplicationNotFoundException ex) {
Assert.fail(ex.getMessage());
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetApplicationAttempts() throws YarnException, IOException {
ClientRMService rmService=createRMService();
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null);
GetApplicationAttemptsRequest request=recordFactory.newRecordInstance(GetApplicationAttemptsRequest.class);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1);
request.setApplicationId(ApplicationId.newInstance(123456,1));
try {
GetApplicationAttemptsResponse response=rmService.getApplicationAttempts(request);
Assert.assertEquals(1,response.getApplicationAttemptList().size());
Assert.assertEquals(attemptId,response.getApplicationAttemptList().get(0).getApplicationAttemptId());
}
catch ( ApplicationNotFoundException ex) {
Assert.fail(ex.getMessage());
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testTokenCancellationByWrongUser(){
RMContext rmContext=mock(RMContext.class);
final ClientRMService rmService=new ClientRMService(rmContext,null,null,null,null,dtsm);
UserGroupInformation[] kerbTestOwners={owner,other,tester,ownerKerb,otherKerb};
UserGroupInformation[] kerbTestRenewers={owner,other,ownerKerb,otherKerb};
for ( final UserGroupInformation tokOwner : kerbTestOwners) {
for ( final UserGroupInformation tokRenewer : kerbTestRenewers) {
try {
testerKerb.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
checkTokenCancellation(rmService,tokOwner,tokRenewer);
Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = "+ tokRenewer.getUserName());
return null;
}
catch ( YarnException e) {
Assert.assertTrue(e.getMessage().contains(testerKerb.getUserName() + " is not authorized to cancel the token"));
return null;
}
}
}
);
}
catch ( Exception e) {
Assert.fail("Unexpected exception; " + e.getMessage());
}
}
}
UserGroupInformation[] simpleTestOwners={owner,other,ownerKerb,otherKerb,testerKerb};
UserGroupInformation[] simpleTestRenewers={owner,other,ownerKerb,otherKerb};
for ( final UserGroupInformation tokOwner : simpleTestOwners) {
for ( final UserGroupInformation tokRenewer : simpleTestRenewers) {
try {
tester.doAs(new PrivilegedExceptionAction(){
@Override public Void run() throws Exception {
try {
checkTokenCancellation(tokOwner,tokRenewer);
Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = "+ tokRenewer.getUserName());
return null;
}
catch ( YarnException ex) {
Assert.assertTrue(ex.getMessage().contains(tester.getUserName() + " is not authorized to cancel the token"));
return null;
}
}
}
);
}
catch ( Exception e) {
Assert.fail("Unexpected exception; " + e.getMessage());
}
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testGetApplications() throws IOException, YarnException {
YarnScheduler yarnScheduler=mockYarnScheduler();
RMContext rmContext=mock(RMContext.class);
mockRMContext(yarnScheduler,rmContext);
RMStateStore stateStore=mock(RMStateStore.class);
when(rmContext.getStateStore()).thenReturn(stateStore);
RMAppManager appManager=new RMAppManager(rmContext,yarnScheduler,null,mock(ApplicationACLsManager.class),new Configuration());
when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler(){
public void handle( Event event){
}
}
);
ApplicationACLsManager mockAclsManager=mock(ApplicationACLsManager.class);
QueueACLsManager mockQueueACLsManager=mock(QueueACLsManager.class);
when(mockQueueACLsManager.checkAccess(any(UserGroupInformation.class),any(QueueACL.class),anyString())).thenReturn(true);
ClientRMService rmService=new ClientRMService(rmContext,yarnScheduler,appManager,mockAclsManager,mockQueueACLsManager,null);
String[] queues={QUEUE_1,QUEUE_2};
String[] appNames={MockApps.newAppName(),MockApps.newAppName(),MockApps.newAppName()};
ApplicationId[] appIds={getApplicationId(101),getApplicationId(102),getApplicationId(103)};
List tags=Arrays.asList("Tag1","Tag2","Tag3");
long[] submitTimeMillis=new long[3];
for (int i=0; i < appIds.length; i++) {
ApplicationId appId=appIds[i];
when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),ApplicationAccessType.VIEW_APP,null,appId)).thenReturn(true);
SubmitApplicationRequest submitRequest=mockSubmitAppRequest(appId,appNames[i],queues[i % queues.length],new HashSet(tags.subList(0,i + 1)));
rmService.submitApplication(submitRequest);
submitTimeMillis[i]=System.currentTimeMillis();
}
GetApplicationsRequest request=GetApplicationsRequest.newInstance();
assertEquals("Incorrect total number of apps",6,rmService.getApplications(request).getApplicationList().size());
request.setLimit(1L);
assertEquals("Failed to limit applications",1,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance();
request.setStartRange(submitTimeMillis[0],System.currentTimeMillis());
assertEquals("Incorrect number of matching start range",2,rmService.getApplications(request).getApplicationList().size());
request.setStartRange(submitTimeMillis[1],System.currentTimeMillis());
assertEquals("Incorrect number of matching start range",1,rmService.getApplications(request).getApplicationList().size());
request.setStartRange(submitTimeMillis[2],System.currentTimeMillis());
assertEquals("Incorrect number of matching start range",0,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance();
Set queueSet=new HashSet();
request.setQueues(queueSet);
queueSet.add(queues[0]);
assertEquals("Incorrect number of applications in queue",2,rmService.getApplications(request).getApplicationList().size());
assertEquals("Incorrect number of applications in queue",2,rmService.getApplications(request,false).getApplicationList().size());
queueSet.add(queues[1]);
assertEquals("Incorrect number of applications in queue",3,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance();
Set userSet=new HashSet();
request.setUsers(userSet);
userSet.add("random-user-name");
assertEquals("Incorrect number of applications for user",0,rmService.getApplications(request).getApplicationList().size());
userSet.add(UserGroupInformation.getCurrentUser().getShortUserName());
assertEquals("Incorrect number of applications for user",3,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance(ApplicationsRequestScope.ALL,null,null,null,null,null,null,null,null);
Set tagSet=new HashSet();
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags",6,rmService.getApplications(request).getApplicationList().size());
tagSet=Sets.newHashSet(tags.get(0));
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags",3,rmService.getApplications(request).getApplicationList().size());
tagSet=Sets.newHashSet(tags.get(1));
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags",2,rmService.getApplications(request).getApplicationList().size());
tagSet=Sets.newHashSet(tags.get(2));
request.setApplicationTags(tagSet);
assertEquals("Incorrect number of matching tags",1,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance(ApplicationsRequestScope.VIEWABLE);
assertEquals("Incorrect number of applications for the scope",6,rmService.getApplications(request).getApplicationList().size());
request=GetApplicationsRequest.newInstance(ApplicationsRequestScope.OWN);
assertEquals("Incorrect number of applications for the scope",3,rmService.getApplications(request).getApplicationList().size());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testGetContainers() throws YarnException, IOException {
ClientRMService rmService=createRMService();
RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null);
GetContainersRequest request=recordFactory.newRecordInstance(GetContainersRequest.class);
ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1);
ContainerId containerId=ContainerId.newInstance(attemptId,1);
request.setApplicationAttemptId(attemptId);
try {
GetContainersResponse response=rmService.getContainers(request);
Assert.assertEquals(containerId,response.getContainerList().get(0).getContainerId());
}
catch ( ApplicationNotFoundException ex) {
Assert.fail(ex.getMessage());
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=50000) public void testBlackListNodes() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,FifoScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
FifoScheduler fs=(FifoScheduler)rm.getResourceScheduler();
int rack_num_0=0;
int rack_num_1=1;
String host_0_0="127.0.0.1";
RMNode n1=MockNodes.newNodeInfo(rack_num_0,MockNodes.newResource(4 * GB),1,host_0_0);
fs.handle(new NodeAddedSchedulerEvent(n1));
String host_0_1="127.0.0.2";
RMNode n2=MockNodes.newNodeInfo(rack_num_0,MockNodes.newResource(4 * GB),1,host_0_1);
fs.handle(new NodeAddedSchedulerEvent(n2));
String host_1_0="127.0.0.3";
RMNode n3=MockNodes.newNodeInfo(rack_num_1,MockNodes.newResource(4 * GB),1,host_1_0);
fs.handle(new NodeAddedSchedulerEvent(n3));
String host_1_1="127.0.0.4";
RMNode n4=MockNodes.newNodeInfo(rack_num_1,MockNodes.newResource(4 * GB),1,host_1_1);
fs.handle(new NodeAddedSchedulerEvent(n4));
ApplicationId appId1=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId1=BuilderUtils.newApplicationAttemptId(appId1,1);
SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId1,"queue","user");
fs.handle(appEvent);
SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId1,false);
fs.handle(attemptEvent);
List emptyId=new ArrayList();
List emptyAsk=new ArrayList();
List ask1=new ArrayList();
ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),"rack1",BuilderUtils.newResource(GB,1),1));
ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(GB,1),1));
fs.allocate(appAttemptId1,ask1,emptyId,Collections.singletonList(host_1_0),null);
fs.handle(new NodeUpdateSchedulerEvent(n3));
Allocation allocation1=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation1",0,allocation1.getContainers().size());
fs.handle(new NodeUpdateSchedulerEvent(n4));
Allocation allocation2=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation2",1,allocation2.getContainers().size());
List containerList=allocation2.getContainers();
for ( Container container : containerList) {
Assert.assertEquals("Container is allocated on n4",container.getNodeId(),n4.getNodeID());
}
List ask2=new ArrayList();
ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(GB,1),1));
fs.allocate(appAttemptId1,ask2,emptyId,Collections.singletonList("rack0"),null);
fs.handle(new NodeUpdateSchedulerEvent(n1));
Allocation allocation3=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation3",0,allocation3.getContainers().size());
fs.handle(new NodeUpdateSchedulerEvent(n2));
Allocation allocation4=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation4",0,allocation4.getContainers().size());
fs.handle(new NodeUpdateSchedulerEvent(n3));
Allocation allocation5=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation5",0,allocation5.getContainers().size());
fs.handle(new NodeUpdateSchedulerEvent(n4));
Allocation allocation6=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("allocation6",1,allocation6.getContainers().size());
containerList=allocation6.getContainers();
for ( Container container : containerList) {
Assert.assertEquals("Container is allocated on n4",container.getNodeId(),n4.getNodeID());
}
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void test() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB);
MockNM nm2=rm.registerNode("127.0.0.2:5678",4 * GB);
RMApp app1=rm.submitApp(2048);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
SchedulerNodeReport report_nm1=rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(2 * GB,report_nm1.getUsedResource().getMemory());
RMApp app2=rm.submitApp(2048);
nm2.nodeHeartbeat(true);
RMAppAttempt attempt2=app2.getCurrentAppAttempt();
MockAM am2=rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
SchedulerNodeReport report_nm2=rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
Assert.assertEquals(2 * GB,report_nm2.getUsedResource().getMemory());
am1.addRequests(new String[]{"127.0.0.1","127.0.0.2"},GB,1,1);
AllocateResponse alloc1Response=am1.schedule();
am2.addRequests(new String[]{"127.0.0.1","127.0.0.2"},3 * GB,0,1);
AllocateResponse alloc2Response=am2.schedule();
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(1000);
alloc1Response=am1.schedule();
}
while (alloc2Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 2...");
Thread.sleep(1000);
alloc2Response=am2.schedule();
}
nm2.nodeHeartbeat(true);
List allocated1=alloc1Response.getAllocatedContainers();
Assert.assertEquals(1,allocated1.size());
Assert.assertEquals(1 * GB,allocated1.get(0).getResource().getMemory());
Assert.assertEquals(nm1.getNodeId(),allocated1.get(0).getNodeId());
List allocated2=alloc2Response.getAllocatedContainers();
Assert.assertEquals(1,allocated2.size());
Assert.assertEquals(3 * GB,allocated2.get(0).getResource().getMemory());
Assert.assertEquals(nm1.getNodeId(),allocated2.get(0).getNodeId());
report_nm1=rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
report_nm2=rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
Assert.assertEquals(0,report_nm1.getAvailableResource().getMemory());
Assert.assertEquals(2 * GB,report_nm2.getAvailableResource().getMemory());
Assert.assertEquals(6 * GB,report_nm1.getUsedResource().getMemory());
Assert.assertEquals(2 * GB,report_nm2.getUsedResource().getMemory());
Container c1=allocated1.get(0);
Assert.assertEquals(GB,c1.getResource().getMemory());
ContainerStatus containerStatus=BuilderUtils.newContainerStatus(c1.getId(),ContainerState.COMPLETE,"",0);
nm1.containerStatus(containerStatus);
int waitCount=0;
while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) {
LOG.info("Waiting for containers to be finished for app 1... Tried " + waitCount + " times already..");
Thread.sleep(1000);
}
Assert.assertEquals(1,attempt1.getJustFinishedContainers().size());
Assert.assertEquals(1,am1.schedule().getCompletedContainersStatuses().size());
report_nm1=rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(5 * GB,report_nm1.getUsedResource().getMemory());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=50000) public void testHeadroom() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,FifoScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
FifoScheduler fs=(FifoScheduler)rm.getResourceScheduler();
RMNode n1=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,"127.0.0.2");
fs.handle(new NodeAddedSchedulerEvent(n1));
ApplicationId appId1=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId1=BuilderUtils.newApplicationAttemptId(appId1,1);
SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId1,"queue","user");
fs.handle(appEvent);
SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId1,false);
fs.handle(attemptEvent);
ApplicationId appId2=BuilderUtils.newApplicationId(200,2);
ApplicationAttemptId appAttemptId2=BuilderUtils.newApplicationAttemptId(appId2,1);
SchedulerEvent appEvent2=new AppAddedSchedulerEvent(appId2,"queue","user");
fs.handle(appEvent2);
SchedulerEvent attemptEvent2=new AppAttemptAddedSchedulerEvent(appAttemptId2,false);
fs.handle(attemptEvent2);
List emptyId=new ArrayList();
List emptyAsk=new ArrayList();
List ask1=new ArrayList();
ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(GB,1),1));
fs.allocate(appAttemptId1,ask1,emptyId,null,null);
List ask2=new ArrayList();
ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(2 * GB,1),1));
fs.allocate(appAttemptId2,ask2,emptyId,null,null);
fs.handle(new NodeUpdateSchedulerEvent(n1));
Allocation allocation1=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null);
Assert.assertEquals("Allocation headroom",1 * GB,allocation1.getResourceLimit().getMemory());
Allocation allocation2=fs.allocate(appAttemptId2,emptyAsk,emptyId,null,null);
Assert.assertEquals("Allocation headroom",1 * GB,allocation2.getResourceLimit().getMemory());
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=50000) public void testReconnectedNode() throws Exception {
CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration();
conf.setQueues("default",new String[]{"default"});
conf.setCapacity("default",100);
FifoScheduler fs=new FifoScheduler();
fs.init(conf);
fs.start();
RMContext context=mock(RMContext.class);
fs.reinitialize(conf,null);
fs.setRMContext(context);
RMNode n1=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,"127.0.0.2");
RMNode n2=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),2,"127.0.0.3");
fs.handle(new NodeAddedSchedulerEvent(n1));
fs.handle(new NodeAddedSchedulerEvent(n2));
fs.handle(new NodeUpdateSchedulerEvent(n1));
Assert.assertEquals(6 * GB,fs.getRootQueueMetrics().getAvailableMB());
n1=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),1,"127.0.0.2");
fs.handle(new NodeRemovedSchedulerEvent(n1));
fs.handle(new NodeAddedSchedulerEvent(n1));
fs.handle(new NodeUpdateSchedulerEvent(n1));
Assert.assertEquals(4 * GB,fs.getRootQueueMetrics().getAvailableMB());
fs.stop();
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=20000) public void testKillAppWhenFailoverHappensAtNewState() throws Exception {
startRMsWithCustomizedRMAppManager();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false);
try {
failOverAndKillApp(app0.getApplicationId(),RMAppState.NEW);
fail("Should get an exception here");
}
catch ( ApplicationNotFoundException ex) {
Assert.assertTrue(ex.getMessage().contains("Trying to kill an absent application " + app0.getApplicationId()));
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testMoveTooLate() throws Exception {
Application application=new Application("user1",resourceManager);
ApplicationId appId=application.getApplicationId();
application.submit();
ClientRMService clientRMService=resourceManager.getClientRMService();
clientRMService.forceKillApplication(KillApplicationRequest.newInstance(appId));
RMApp rmApp=resourceManager.getRMContext().getRMApps().get(appId);
while (rmApp.getState() != RMAppState.KILLED) {
Thread.sleep(100);
}
try {
clientRMService.moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.newInstance(appId,"newqueue"));
fail("Should have hit exception");
}
catch ( YarnException ex) {
assertEquals(YarnException.class,ex.getClass());
assertEquals("App in KILLED state cannot be moved.",ex.getMessage());
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=10000) public void testMoveSuccessful() throws Exception {
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app=rm1.submitApp(1024);
ClientRMService clientRMService=rm1.getClientRMService();
clientRMService.moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.newInstance(app.getApplicationId(),"newqueue"));
RMApp rmApp=rm1.getRMContext().getRMApps().get(app.getApplicationId());
assertEquals("newqueue",rmApp.getQueue());
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Validate killing an application when it is at accepted state.
* @throws Exception exception
*/
@Test(timeout=60000) public void testApplicationKillAtAcceptedState() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
final Dispatcher dispatcher=new AsyncDispatcher(){
@Override public EventHandler getEventHandler(){
class EventArgMatcher extends ArgumentMatcher {
@Override public boolean matches( Object argument){
if (argument instanceof RMAppAttemptEvent) {
if (((RMAppAttemptEvent)argument).getType().equals(RMAppAttemptEventType.KILL)) {
return true;
}
}
return false;
}
}
EventHandler handler=spy(super.getEventHandler());
doNothing().when(handler).handle(argThat(new EventArgMatcher()));
return handler;
}
}
;
MockRM rm=new MockRM(conf){
@Override protected Dispatcher createDispatcher(){
return dispatcher;
}
}
;
QueueMetrics metrics=rm.getResourceScheduler().getRootQueueMetrics();
int appsKilled=metrics.getAppsKilled();
int appsSubmitted=metrics.getAppsSubmitted();
rm.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm.getResourceTrackerService());
nm1.registerNode();
RMApp application=rm.submitApp(200);
MockAM am=MockRM.launchAM(application,rm,nm1);
am.waitForState(RMAppAttemptState.LAUNCHED);
nm1.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.RUNNING);
rm.waitForState(application.getApplicationId(),RMAppState.ACCEPTED);
KillApplicationRequest request=KillApplicationRequest.newInstance(application.getApplicationId());
rm.getClientRMService().forceKillApplication(request);
am.registerAppAttempt(false);
rm.waitForState(application.getApplicationId(),RMAppState.KILLING);
rm.waitForState(am.getApplicationAttemptId(),RMAppAttemptState.RUNNING);
rm.getRMContext().getDispatcher().getEventHandler().handle(new RMAppEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_KILLED));
rm.waitForState(application.getApplicationId(),RMAppState.KILLED);
metrics=rm.getResourceScheduler().getRootQueueMetrics();
Assert.assertEquals(appsKilled + 1,metrics.getAppsKilled());
Assert.assertEquals(appsSubmitted + 1,metrics.getAppsSubmitted());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testInvalidatedAMHostPortOnAMRestart() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app2=rm1.submitApp(200);
MockAM am2=MockRM.launchAndRegisterAM(app2,rm1,nm1);
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am2.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED);
GetApplicationReportRequest request1=GetApplicationReportRequest.newInstance(app2.getApplicationId());
ApplicationReport report1=rm1.getClientRMService().getApplicationReport(request1).getApplicationReport();
Assert.assertEquals("N/A",report1.getHost());
Assert.assertEquals(-1,report1.getRpcPort());
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=80000) public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app1=rm1.submitApp(200);
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am1);
RMApp app2=rm1.submitApp(200);
MockAM am2=MockRM.launchAndRegisterAM(app2,rm1,nm1);
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am2.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app2.getApplicationId(),RMAppState.FAILED);
RMApp app3=rm1.submitApp(200);
MockAM am3=MockRM.launchAndRegisterAM(app3,rm1,nm1);
rm1.killApp(app3.getApplicationId());
rm1.waitForState(app3.getApplicationId(),RMAppState.KILLED);
rm1.waitForState(am3.getApplicationAttemptId(),RMAppAttemptState.KILLED);
GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED));
GetApplicationsResponse response1=rm1.getClientRMService().getApplications(request1);
List appList1=response1.getApplicationList();
Assert.assertEquals(3,appList1.size());
for ( ApplicationReport report : appList1) {
if (report.getApplicationId().equals(app2.getApplicationId()) || report.getApplicationId().equals(app3.getApplicationId())) {
Assert.assertEquals("N/A",report.getHost());
Assert.assertEquals(-1,report.getRpcPort());
}
if (report.getApplicationId().equals(app1.getApplicationId())) {
Assert.assertFalse(report.getHost().equals("N/A"));
Assert.assertTrue(report.getRpcPort() != -1);
}
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testAppOnMultiNode() throws Exception {
Logger rootLogger=LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
YarnConfiguration conf=new YarnConfiguration();
conf.set("yarn.scheduler.capacity.node-locality-delay","-1");
MockRM rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("h1:1234",5120);
MockNM nm2=rm.registerNode("h2:5678",10240);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
int request=13;
am.allocate("h1",1000,request,new ArrayList());
List conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
int contReceived=conts.size();
while (contReceived < 3) {
nm1.nodeHeartbeat(true);
conts.addAll(am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
contReceived=conts.size();
LOG.info("Got " + contReceived + " containers. Waiting to get "+ 3);
Thread.sleep(WAIT_SLEEP_MS);
}
Assert.assertEquals(3,conts.size());
conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
contReceived=conts.size();
while (contReceived < 10) {
nm2.nodeHeartbeat(true);
conts.addAll(am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
contReceived=conts.size();
LOG.info("Got " + contReceived + " containers. Waiting to get "+ 10);
Thread.sleep(WAIT_SLEEP_MS);
}
Assert.assertEquals(10,conts.size());
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testNMTokenSentForNormalContainer() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.set(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class.getCanonicalName());
MockRM rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("h1:1234",5120);
RMApp app=rm.submitApp(2000);
RMAppAttempt attempt=app.getCurrentAppAttempt();
CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler();
cs.getApplicationAttempt(attempt.getAppAttemptId()).getNewContainerId();
nm1.nodeHeartbeat(true);
MockAM am=MockRM.launchAM(app,rm,nm1);
Assert.assertTrue(attempt.getMasterContainer().getId().getId() != 1);
Assert.assertFalse(rm.getRMContext().getNMTokenSecretManager().isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId()));
am.registerAppAttempt();
rm.waitForState(app.getApplicationId(),RMAppState.RUNNING);
int NUM_CONTAINERS=1;
List containers=new ArrayList();
List expectedNMTokens=new ArrayList();
while (true) {
AllocateResponse response=am.allocate("127.0.0.1",2000,NUM_CONTAINERS,new ArrayList());
nm1.nodeHeartbeat(true);
containers.addAll(response.getAllocatedContainers());
expectedNMTokens.addAll(response.getNMTokens());
if (containers.size() == NUM_CONTAINERS) {
break;
}
Thread.sleep(200);
System.out.println("Waiting for container to be allocated.");
}
NodeId nodeId=expectedNMTokens.get(0).getNodeId();
Assert.assertEquals(nm1.getNodeId(),nodeId);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=40000) public void testNMToken() throws Exception {
MockRM rm=new MockRM();
try {
rm.start();
MockNM nm1=rm.registerNode("h1:1234",10000);
NMTokenSecretManagerInRM nmTokenSecretManager=rm.getRMContext().getNMTokenSecretManager();
RMApp app=rm.submitApp(1000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
am.registerAppAttempt();
ArrayList containersReceivedForNM1=new ArrayList();
List releaseContainerList=new ArrayList();
HashMap nmTokens=new HashMap();
AllocateResponse response=am.allocate("h1",1000,2,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM1,2,nmTokens,nm1);
Assert.assertEquals(1,nmTokens.size());
response=am.allocate("h1",1000,2,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM1,4,nmTokens,nm1);
Assert.assertEquals(1,nmTokens.size());
MockNM nm2=rm.registerNode("h2:1234",10000);
nm2.nodeHeartbeat(true);
ArrayList containersReceivedForNM2=new ArrayList();
response=am.allocate("h2",1000,2,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,2,nmTokens,nm2);
Assert.assertEquals(2,nmTokens.size());
nm2=rm.registerNode("h2:1234",10000);
Map nodes=rm.getRMContext().getRMNodes();
while (nodes.get(nm2.getNodeId()).getLastNodeHeartBeatResponse().getResponseId() > 0) {
Thread.sleep(WAIT_SLEEP_MS);
}
int interval=40;
while (nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()) && interval-- > 0) {
LOG.info("waiting for nmToken to be cleared for : " + nm2.getNodeId());
Thread.sleep(WAIT_SLEEP_MS);
}
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
nmTokens.remove(nm2.getNodeId().toString());
Assert.assertEquals(1,nmTokens.size());
response=am.allocate("h2",1000,2,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,4,nmTokens,nm2);
Assert.assertEquals(2,nmTokens.size());
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId()));
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()));
nmTokenSecretManager.rollMasterKey();
nmTokenSecretManager.activateNextMasterKey();
Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId()));
Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()));
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
nmTokens.clear();
Assert.assertEquals(0,nmTokens.size());
response=am.allocate("h2",1000,1,releaseContainerList);
Assert.assertEquals(0,response.getAllocatedContainers().size());
allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,5,nmTokens,nm2);
Assert.assertEquals(1,nmTokens.size());
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()));
Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
am.unregisterAppAttempt();
for ( Container container : containersReceivedForNM1) {
nm1.nodeHeartbeat(attempt.getAppAttemptId(),container.getId().getId(),ContainerState.COMPLETE);
}
for ( Container container : containersReceivedForNM2) {
nm2.nodeHeartbeat(attempt.getAppAttemptId(),container.getId().getId(),ContainerState.COMPLETE);
}
nm1.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId()));
}
finally {
rm.stop();
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testAdminRefreshQueuesWithLocalConfigurationProvider() throws IOException, YarnException {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
CapacityScheduler cs=(CapacityScheduler)rm.getRMContext().getScheduler();
int maxAppsBefore=cs.getConfiguration().getMaximumSystemApplications();
try {
rm.adminService.refreshQueues(RefreshQueuesRequest.newInstance());
Assert.assertEquals(maxAppsBefore,cs.getConfiguration().getMaximumSystemApplications());
}
catch ( Exception ex) {
fail("Using localConfigurationProvider. Should not get any exception.");
}
}
APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testRMInitialsWithFileSystemBasedConfigurationProvider() throws Exception {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
final File excludeHostsFile=new File(tmpDir.toString(),"excludeHosts");
if (excludeHostsFile.exists()) {
excludeHostsFile.delete();
}
if (!excludeHostsFile.createNewFile()) {
Assert.fail("Can not create " + "excludeHosts");
}
PrintWriter fileWriter=new PrintWriter(excludeHostsFile);
fileWriter.write("0.0.0.0:123");
fileWriter.close();
uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath()));
YarnConfiguration yarnConf=new YarnConfiguration();
yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL,"world:anyone:rwcda");
yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,this.workingPath + "/excludeHosts");
uploadConfiguration(yarnConf,"yarn-site.xml");
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
csConf.set("yarn.scheduler.capacity.maximum-applications","5000");
uploadConfiguration(csConf,"capacity-scheduler.xml");
String aclsString="alice,bob users,wheel";
Configuration newConf=new Configuration();
newConf.set("security.applicationclient.protocol.acl",aclsString);
uploadConfiguration(newConf,"hadoop-policy.xml");
Configuration conf=new Configuration();
conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,true);
conf.set("hadoop.proxyuser.test.groups","test_groups");
conf.set("hadoop.proxyuser.test.hosts","test_hosts");
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MockUnixGroupsMapping.class,GroupMappingServiceProvider.class);
uploadConfiguration(conf,"core-site.xml");
MockUnixGroupsMapping.updateGroups();
ResourceManager resourceManager=null;
try {
try {
resourceManager=new ResourceManager();
resourceManager.init(configuration);
resourceManager.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
Set excludeHosts=resourceManager.getRMContext().getNodesListManager().getHostsReader().getExcludedHosts();
Assert.assertTrue(excludeHosts.size() == 1);
Assert.assertTrue(excludeHosts.contains("0.0.0.0:123"));
String aclStringAfter=resourceManager.adminService.getAccessControlList().getAclString().trim();
Assert.assertEquals(aclStringAfter,"world:anyone:rwcda");
CapacityScheduler cs=(CapacityScheduler)resourceManager.getRMContext().getScheduler();
int maxAppsAfter=cs.getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxAppsAfter,5000);
ServiceAuthorizationManager adminServiceServiceManager=resourceManager.adminService.getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(adminServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
ServiceAuthorizationManager clientRMServiceServiceManager=resourceManager.getRMContext().getClientRMService().getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(clientRMServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
ServiceAuthorizationManager appMasterService=resourceManager.getRMContext().getApplicationMasterService().getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(appMasterService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
ServiceAuthorizationManager RTService=resourceManager.getRMContext().getResourceTrackerService().getServer().getServiceAuthorizationManager();
verifyServiceACLsRefresh(RTService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups"));
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1);
Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts"));
List groupAfter=Groups.getUserToGroupsMappingService(configuration).getGroups(UserGroupInformation.getCurrentUser().getUserName());
Assert.assertTrue(groupAfter.contains("test_group_D") && groupAfter.contains("test_group_E") && groupAfter.contains("test_group_F")&& groupAfter.size() == 3);
}
finally {
if (resourceManager != null) {
resourceManager.stop();
}
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRMHAWithFileSystemBasedConfiguration() throws IOException, YarnException {
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
configuration.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2");
int base=100;
for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(configuration)) {
configuration.set(HAUtil.addSuffix(confKey,"rm1"),"0.0.0.0:" + (base + 20));
configuration.set(HAUtil.addSuffix(confKey,"rm2"),"0.0.0.0:" + (base + 40));
base=base * 2;
}
Configuration conf1=new Configuration(configuration);
conf1.set(YarnConfiguration.RM_HA_ID,"rm1");
Configuration conf2=new Configuration(configuration);
conf2.set(YarnConfiguration.RM_HA_ID,"rm2");
uploadDefaultConfiguration();
MockRM rm1=null;
MockRM rm2=null;
try {
rm1=new MockRM(conf1);
rm1.init(conf1);
rm1.start();
Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY);
rm2=new MockRM(conf2);
rm2.init(conf1);
rm2.start();
Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.STANDBY);
rm1.adminService.transitionToActive(requestInfo);
Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.ACTIVE);
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
csConf.set("yarn.scheduler.capacity.maximum-applications","5000");
uploadConfiguration(csConf,"capacity-scheduler.xml");
rm1.adminService.refreshQueues(RefreshQueuesRequest.newInstance());
int maxApps=((CapacityScheduler)rm1.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxApps,5000);
int maxAppsBeforeFailOver=((CapacityScheduler)rm2.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxAppsBeforeFailOver,10000);
rm1.adminService.transitionToStandby(requestInfo);
rm2.adminService.transitionToActive(requestInfo);
Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY);
Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.ACTIVE);
int maxAppsAfter=((CapacityScheduler)rm2.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxAppsAfter,5000);
}
finally {
if (rm1 != null) {
rm1.stop();
}
if (rm2 != null) {
rm2.stop();
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testAdminAclsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
uploadDefaultConfiguration();
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
String aclStringBefore=rm.adminService.getAccessControlList().getAclString().trim();
YarnConfiguration yarnConf=new YarnConfiguration();
yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL,"world:anyone:rwcda");
uploadConfiguration(yarnConf,"yarn-site.xml");
rm.adminService.refreshAdminAcls(RefreshAdminAclsRequest.newInstance());
String aclStringAfter=rm.adminService.getAccessControlList().getAclString().trim();
Assert.assertTrue(!aclStringAfter.equals(aclStringBefore));
Assert.assertEquals(aclStringAfter,"world:anyone:rwcda");
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
String[] defaultTestUserGroups={"dummy_group1","dummy_group2"};
UserGroupInformation ugi=UserGroupInformation.createUserForTesting("dummyUser",defaultTestUserGroups);
String user=ugi.getUserName();
List groupWithInit=new ArrayList(2);
for (int i=0; i < ugi.getGroupNames().length; i++) {
groupWithInit.add(ugi.getGroupNames()[i]);
}
uploadDefaultConfiguration();
Configuration conf=new Configuration();
conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MockUnixGroupsMapping.class,GroupMappingServiceProvider.class);
uploadConfiguration(conf,"core-site.xml");
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
List groupBefore=new ArrayList(Groups.getUserToGroupsMappingService(configuration).getGroups(user));
Assert.assertTrue(groupBefore.contains("test_group_A") && groupBefore.contains("test_group_B") && groupBefore.contains("test_group_C")&& groupBefore.size() == 3);
Assert.assertTrue(groupWithInit.size() != groupBefore.size());
Assert.assertFalse(groupWithInit.contains("test_group_A") || groupWithInit.contains("test_group_B") || groupWithInit.contains("test_group_C"));
MockUnixGroupsMapping.updateGroups();
rm.adminService.refreshUserToGroupsMappings(RefreshUserToGroupsMappingsRequest.newInstance());
List groupAfter=Groups.getUserToGroupsMappingService(configuration).getGroups(user);
Assert.assertTrue(groupAfter.contains("test_group_D") && groupAfter.contains("test_group_E") && groupAfter.contains("test_group_F")&& groupAfter.size() == 3);
}
BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test public void testRefreshNodesWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
uploadDefaultConfiguration();
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
final File excludeHostsFile=new File(tmpDir.toString(),"excludeHosts");
if (excludeHostsFile.exists()) {
excludeHostsFile.delete();
}
if (!excludeHostsFile.createNewFile()) {
Assert.fail("Can not create " + "excludeHosts");
}
PrintWriter fileWriter=new PrintWriter(excludeHostsFile);
fileWriter.write("0.0.0.0:123");
fileWriter.close();
uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath()));
Configuration yarnConf=new YarnConfiguration();
yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,this.workingPath + "/excludeHosts");
uploadConfiguration(yarnConf,YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
rm.adminService.refreshNodes(RefreshNodesRequest.newInstance());
Set excludeHosts=rm.getNodesListManager().getHostsReader().getExcludedHosts();
Assert.assertTrue(excludeHosts.size() == 1);
Assert.assertTrue(excludeHosts.contains("0.0.0.0:123"));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAdminRefreshQueuesWithFileSystemBasedConfigurationProvider() throws IOException, YarnException {
configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
uploadDefaultConfiguration();
try {
rm=new MockRM(configuration);
rm.init(configuration);
rm.start();
}
catch ( Exception ex) {
fail("Should not get any exceptions");
}
CapacityScheduler cs=(CapacityScheduler)rm.getRMContext().getScheduler();
int maxAppsBefore=cs.getConfiguration().getMaximumSystemApplications();
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
csConf.set("yarn.scheduler.capacity.maximum-applications","5000");
uploadConfiguration(csConf,"capacity-scheduler.xml");
rm.adminService.refreshQueues(RefreshQueuesRequest.newInstance());
int maxAppsAfter=cs.getConfiguration().getMaximumSystemApplications();
Assert.assertEquals(maxAppsAfter,5000);
Assert.assertTrue(maxAppsAfter != maxAppsBefore);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRMDispatcherForHA() throws IOException {
String errorMessageForEventHandler="Expect to get the same number of handlers";
String errorMessageForService="Expect to get the same number of services";
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf){
@Override protected Dispatcher createDispatcher(){
return new MyCountingDispatcher();
}
}
;
rm.init(conf);
int expectedEventHandlerCount=((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount();
int expectedServiceCount=rm.getServices().size();
assertTrue(expectedEventHandlerCount != 0);
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive());
rm.start();
rm.adminService.transitionToStandby(requestInfo);
rm.adminService.transitionToActive(requestInfo);
rm.adminService.transitionToStandby(requestInfo);
rm.adminService.transitionToActive(requestInfo);
rm.adminService.transitionToStandby(requestInfo);
MyCountingDispatcher dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher();
assertTrue(!dispatcher.isStopped());
rm.adminService.transitionToActive(requestInfo);
assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount());
assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size());
dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher();
rm.adminService.transitionToStandby(requestInfo);
assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount());
assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size());
assertTrue(dispatcher.isStopped());
rm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test to verify the following RM HA transitions to the following states.
* 1. Standby: Should be a no-op
* 2. Active: Active services should start
* 3. Active: Should be a no-op.
* While active, submit a couple of jobs
* 4. Standby: Active services should stop
* 5. Active: Active services should start
* 6. Stop the RM: All services should stop and RM should not be ready to
* become Active
*/
@Test(timeout=30000) public void testFailoverAndTransitions() throws Exception {
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf);
rm.init(conf);
StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive());
checkMonitorHealth();
rm.start();
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToStandby(requestInfo);
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,1,1,1,2048,1);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,2,2,2,2048,2);
rm.adminService.transitionToStandby(requestInfo);
checkMonitorHealth();
checkStandbyRMFunctionality();
verifyClusterMetrics(0,0,0,0,0,0);
rm.adminService.transitionToActive(requestInfo);
checkMonitorHealth();
checkActiveRMFunctionality();
verifyClusterMetrics(1,1,1,1,2048,1);
rm.stop();
assertEquals(STATE_ERR,HAServiceState.STOPPING,rm.adminService.getServiceStatus().getState());
assertFalse("RM is ready to become active even after it is stopped",rm.adminService.getServiceStatus().isReadyToBecomeActive());
assertFalse("Active RM services are started",rm.areActiveServicesRunning());
checkMonitorHealth();
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testHAIDLookup(){
Configuration conf=new YarnConfiguration(configuration);
rm=new MockRM(conf);
rm.init(conf);
assertEquals(conf.get(YarnConfiguration.RM_HA_ID),RM2_NODE_ID);
configuration.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID);
conf=new YarnConfiguration(configuration);
rm=new MockRM(conf);
rm.init(conf);
assertEquals(conf.get(YarnConfiguration.RM_HA_ID),RM1_NODE_ID);
configuration.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM3_NODE_ID);
configuration.unset(YarnConfiguration.RM_HA_ID);
conf=new YarnConfiguration(configuration);
try {
rm=new MockRM(conf);
rm.init(conf);
fail("Should get an exception here.");
}
catch ( Exception ex) {
Assert.assertTrue(ex.getMessage().contains("Invalid configuration! Can not find valid RM_HA_ID."));
}
}
InternalCallVerifier EqualityVerifier
@Test public void testRunningExpire(){
RMNodeImpl node=getRunningNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.EXPIRE));
Assert.assertEquals("Active Nodes",initialActive - 1,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost + 1,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.LOST,node.getState());
}
InternalCallVerifier EqualityVerifier
@Test public void testUnhealthyRebooting(){
RMNodeImpl node=getUnhealthyNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.REBOOTING));
Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy - 1,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted + 1,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.REBOOTED,node.getState());
}
InternalCallVerifier EqualityVerifier
@Test public void testUnhealthyDecommission(){
RMNodeImpl node=getUnhealthyNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.DECOMMISSION));
Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy - 1,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned + 1,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.DECOMMISSIONED,node.getState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReconnnectUpdate(){
final String nmVersion1="nm version 1";
final String nmVersion2="nm version 2";
RMNodeImpl node=getRunningNode(nmVersion1);
Assert.assertEquals(nmVersion1,node.getNodeManagerVersion());
RMNodeImpl reconnectingNode=getRunningNode(nmVersion2);
node.handle(new RMNodeReconnectEvent(node.getNodeID(),reconnectingNode,null));
Assert.assertEquals(nmVersion2,node.getNodeManagerVersion());
}
InternalCallVerifier EqualityVerifier
@Test public void testUnhealthyExpire(){
RMNodeImpl node=getUnhealthyNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.EXPIRE));
Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost + 1,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy - 1,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.LOST,node.getState());
}
InternalCallVerifier EqualityVerifier
@Test public void testRunningRebooting(){
RMNodeImpl node=getRunningNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.REBOOTING));
Assert.assertEquals("Active Nodes",initialActive - 1,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted + 1,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.REBOOTED,node.getState());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testUpdateHeartbeatResponseForCleanup(){
RMNodeImpl node=getRunningNode();
NodeId nodeId=node.getNodeID();
ContainerId completedContainerId=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(0,0),0),0);
node.handle(new RMNodeCleanContainerEvent(nodeId,completedContainerId));
Assert.assertEquals(1,node.getContainersToCleanUp().size());
ApplicationId finishedAppId=BuilderUtils.newApplicationId(0,1);
node.handle(new RMNodeCleanAppEvent(nodeId,finishedAppId));
Assert.assertEquals(1,node.getAppsToCleanup().size());
RMNodeStatusEvent statusEvent=getMockRMNodeStatusEvent();
node.handle(statusEvent);
Assert.assertEquals(1,node.getContainersToCleanUp().size());
Assert.assertEquals(1,node.getAppsToCleanup().size());
NodeHeartbeatResponse hbrsp=Records.newRecord(NodeHeartbeatResponse.class);
node.updateNodeHeartbeatResponseForCleanup(hbrsp);
Assert.assertEquals(0,node.getContainersToCleanUp().size());
Assert.assertEquals(0,node.getAppsToCleanup().size());
Assert.assertEquals(1,hbrsp.getContainersToCleanup().size());
Assert.assertEquals(completedContainerId,hbrsp.getContainersToCleanup().get(0));
Assert.assertEquals(1,hbrsp.getApplicationsToCleanup().size());
Assert.assertEquals(finishedAppId,hbrsp.getApplicationsToCleanup().get(0));
}
InternalCallVerifier EqualityVerifier
@Test public void testUnhealthyExpireForSchedulerRemove(){
RMNodeImpl node=getUnhealthyNode();
verify(scheduler,times(2)).handle(any(NodeRemovedSchedulerEvent.class));
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.EXPIRE));
verify(scheduler,times(2)).handle(any(NodeRemovedSchedulerEvent.class));
Assert.assertEquals(NodeState.LOST,node.getState());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAdd(){
RMNodeImpl node=getNewNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeStartedEvent(node.getNodeID(),null,null));
Assert.assertEquals("Active Nodes",initialActive + 1,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.RUNNING,node.getState());
Assert.assertNotNull(nodesListManagerEvent);
Assert.assertEquals(NodesListManagerEventType.NODE_USABLE,nodesListManagerEvent.getType());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testStatusChange(){
node.handle(new RMNodeStartedEvent(null,null,null));
node.setNextHeartBeat(false);
ContainerId completedContainerId1=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(0,0),0),0);
ContainerId completedContainerId2=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(1,1),1),1);
RMNodeStatusEvent statusEvent1=getMockRMNodeStatusEvent();
RMNodeStatusEvent statusEvent2=getMockRMNodeStatusEvent();
ContainerStatus containerStatus1=mock(ContainerStatus.class);
ContainerStatus containerStatus2=mock(ContainerStatus.class);
doReturn(completedContainerId1).when(containerStatus1).getContainerId();
doReturn(Collections.singletonList(containerStatus1)).when(statusEvent1).getContainers();
doReturn(completedContainerId2).when(containerStatus2).getContainerId();
doReturn(Collections.singletonList(containerStatus2)).when(statusEvent2).getContainers();
verify(scheduler,times(1)).handle(any(NodeUpdateSchedulerEvent.class));
node.handle(statusEvent1);
node.handle(statusEvent2);
verify(scheduler,times(1)).handle(any(NodeUpdateSchedulerEvent.class));
Assert.assertEquals(2,node.getQueueSize());
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.EXPIRE));
Assert.assertEquals(0,node.getQueueSize());
}
InternalCallVerifier EqualityVerifier
@Test public void testRunningDecommission(){
RMNodeImpl node=getRunningNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.DECOMMISSION));
Assert.assertEquals("Active Nodes",initialActive - 1,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned + 1,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.DECOMMISSIONED,node.getState());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testReconnect(){
RMNodeImpl node=getRunningNode();
ClusterMetrics cm=ClusterMetrics.getMetrics();
int initialActive=cm.getNumActiveNMs();
int initialLost=cm.getNumLostNMs();
int initialUnhealthy=cm.getUnhealthyNMs();
int initialDecommissioned=cm.getNumDecommisionedNMs();
int initialRebooted=cm.getNumRebootedNMs();
node.handle(new RMNodeReconnectEvent(node.getNodeID(),node,null));
Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs());
Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs());
Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs());
Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs());
Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs());
Assert.assertEquals(NodeState.RUNNING,node.getState());
Assert.assertNotNull(nodesListManagerEvent);
Assert.assertEquals(NodesListManagerEventType.NODE_USABLE,nodesListManagerEvent.getType());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testExpiredContainer(){
node.handle(new RMNodeStartedEvent(null,null,null));
verify(scheduler).handle(any(NodeAddedSchedulerEvent.class));
ContainerId completedContainerId=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(0,0),0),0);
node.handle(new RMNodeCleanContainerEvent(null,completedContainerId));
Assert.assertEquals(1,node.getContainersToCleanUp().size());
RMNodeStatusEvent statusEvent=getMockRMNodeStatusEvent();
ContainerStatus containerStatus=mock(ContainerStatus.class);
doReturn(completedContainerId).when(containerStatus).getContainerId();
doReturn(Collections.singletonList(containerStatus)).when(statusEvent).getContainers();
node.handle(statusEvent);
verify(scheduler,times(2)).handle(any(NodeUpdateSchedulerEvent.class));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartAppRunningAMFailed() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
nm1.nodeHeartbeat(am0.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am0.waitForState(RMAppAttemptState.FAILED);
ApplicationState appState=rmAppState.get(app0.getApplicationId());
Assert.assertEquals(RMAppAttemptState.FAILED,appState.getAttempt(am0.getApplicationAttemptId()).getState());
Assert.assertNull(rmAppState.get(app0.getApplicationId()).getState());
rm1.waitForState(app0.getApplicationId(),RMAppState.ACCEPTED);
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FAILED);
rm1.stop();
rm2.stop();
}
InternalCallVerifier BooleanVerifier
@Test(timeout=60000) public void testRMRestartKilledAppWithNoAttempts() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore(){
@Override public synchronized void storeApplicationAttemptStateInternal( ApplicationAttemptId attemptId, ApplicationAttemptStateData attemptStateData) throws Exception {
}
@Override public synchronized void updateApplicationAttemptStateInternal( ApplicationAttemptId attemptId, ApplicationAttemptStateData attemptStateData) throws Exception {
}
}
;
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
RMApp app0=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false);
rm1.killApp(app0.getApplicationId());
rm1.waitForState(app0.getApplicationId(),RMAppState.KILLED);
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId());
rm2.waitForState(loadedApp0.getApplicationId(),RMAppState.KILLED);
Assert.assertTrue(loadedApp0.getAppAttempts().size() == 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("rawtypes") @Test(timeout=180000) public void testRMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
MockNM nm2=new MockNM("127.0.0.2:5678",15120,rm1.getResourceTrackerService());
nm1.registerNode();
nm2.registerNode();
RMApp app0=rm1.submitApp(200);
RMAppAttempt attempt0=app0.getCurrentAppAttempt();
Assert.assertEquals(1,rmAppState.size());
nm1.nodeHeartbeat(true);
MockAM am0=rm1.sendAMLaunched(attempt0.getAppAttemptId());
am0.registerAppAttempt();
finishApplicationMaster(app0,rm1,nm1,am0);
RMApp app1=rm1.submitApp(200);
ApplicationState appState=rmAppState.get(app1.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app1.getApplicationSubmissionContext().getApplicationId());
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
ApplicationAttemptId attemptId1=attempt1.getAppAttemptId();
rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
Assert.assertEquals(1,appState.getAttemptCount());
ApplicationAttemptState attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
MockAM am1=rm1.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.allocate("127.0.0.1",1000,1,new ArrayList());
nm1.nodeHeartbeat(true);
List conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
while (conts.size() == 0) {
nm1.nodeHeartbeat(true);
conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
Thread.sleep(500);
}
RMApp app2=rm1.submitApp(200);
appState=rmAppState.get(app2.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app2.getApplicationSubmissionContext().getApplicationId());
RMApp appUnmanaged=rm1.submitApp(200,"someApp","someUser",null,true,null,conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null);
ApplicationAttemptId unmanagedAttemptId=appUnmanaged.getCurrentAppAttempt().getAppAttemptId();
ApplicationId unmanagedAppId=appUnmanaged.getApplicationId();
appState=rmAppState.get(unmanagedAppId);
Assert.assertNotNull(appState);
rm1.waitForState(unmanagedAttemptId,RMAppAttemptState.LAUNCHED);
rm1.waitForState(unmanagedAppId,RMAppState.ACCEPTED);
Assert.assertEquals(1,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),appUnmanaged.getApplicationSubmissionContext().getApplicationId());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm2.setResourceTrackerService(rm2.getResourceTrackerService());
Assert.assertEquals(4,rm2.getRMContext().getRMApps().size());
rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED);
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FINISHED);
RMApp loadedApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
Assert.assertNotNull(loadedApp1);
Assert.assertEquals(1,loadedApp1.getAppAttempts().size());
Assert.assertEquals(app1.getApplicationSubmissionContext().getApplicationId(),loadedApp1.getApplicationSubmissionContext().getApplicationId());
RMApp loadedApp2=rm2.getRMContext().getRMApps().get(app2.getApplicationId());
Assert.assertNotNull(loadedApp2);
Assert.assertEquals(app2.getApplicationSubmissionContext().getApplicationId(),loadedApp2.getApplicationSubmissionContext().getApplicationId());
rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED);
rm2.waitForState(loadedApp2.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(1,loadedApp1.getAppAttempts().size());
Assert.assertEquals(1,loadedApp2.getAppAttempts().size());
am1.setAMRMProtocol(rm2.getApplicationMasterService(),rm2.getRMContext());
AllocateResponse allocResponse=am1.allocate(new ArrayList(),new ArrayList());
Assert.assertEquals(AMCommand.AM_SHUTDOWN,allocResponse.getAMCommand());
NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
hbResponse=nm2.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction());
nm1=new MockNM("127.0.0.1:1234",15120,rm2.getResourceTrackerService());
nm2=new MockNM("127.0.0.2:5678",15120,rm2.getResourceTrackerService());
NMContainerStatus status=TestRMRestart.createNMContainerStatus(loadedApp1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(status),null);
nm2.registerNode();
rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED);
int timeoutSecs=0;
while (loadedApp1.getAppAttempts().size() != 2 && timeoutSecs++ < 40) {
;
Thread.sleep(200);
}
hbResponse=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction());
hbResponse=nm2.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction());
attempt1=loadedApp1.getCurrentAppAttempt();
attemptId1=attempt1.getAppAttemptId();
rm2.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
appState=rmAppState.get(loadedApp1.getApplicationId());
attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
MockNM am1Node=nm1;
if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) {
am1Node=nm2;
}
RMAppAttempt attempt2=loadedApp2.getCurrentAppAttempt();
ApplicationAttemptId attemptId2=attempt2.getAppAttemptId();
rm2.waitForState(attemptId2,RMAppAttemptState.ALLOCATED);
appState=rmAppState.get(loadedApp2.getApplicationId());
attemptState=appState.getAttempt(attemptId2);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId2,1),attemptState.getMasterContainer().getId());
MockNM am2Node=nm1;
if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) {
am2Node=nm2;
}
am1=rm2.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
MockAM am2=rm2.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
am1.allocate("127.0.0.1",1000,3,new ArrayList());
am2.allocate("127.0.0.2",1000,1,new ArrayList());
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(true);
conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
while (conts.size() == 0) {
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(true);
conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
Thread.sleep(500);
}
finishApplicationMaster(loadedApp1,rm2,am1Node,am1);
finishApplicationMaster(loadedApp2,rm2,am2Node,am2);
rm2.stop();
rm1.stop();
Assert.assertEquals(4,rmAppState.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartFailedApp() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
nm1.nodeHeartbeat(am0.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am0.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app0.getApplicationId(),RMAppState.FAILED);
ApplicationState appState=rmAppState.get(app0.getApplicationId());
Assert.assertEquals(RMAppState.FAILED,appState.getState());
Assert.assertEquals(RMAppAttemptState.FAILED,appState.getAttempt(am0.getApplicationAttemptId()).getState());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId());
rm2.waitForState(app0.getApplicationId(),RMAppState.FAILED);
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FAILED);
Assert.assertEquals(1,loadedApp0.getAppAttempts().size());
verifyAppReportAfterRMRestart(app0,rm2);
Assert.assertTrue(app0.getDiagnostics().toString().contains("Failing the application."));
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMStateStoreDispatcherDrainedOnRMStop() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore(){
volatile boolean wait=true;
@Override public void serviceStop() throws Exception {
wait=false;
super.serviceStop();
}
@Override protected void handleStoreEvent( RMStateStoreEvent event){
while (wait) ;
super.handleStoreEvent(event);
}
}
;
memStore.init(conf);
final MockRM rm1=new MockRM(conf,memStore);
rm1.start();
final ArrayList appList=new ArrayList();
final int NUM_APPS=5;
for (int i=0; i < NUM_APPS; i++) {
RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false);
appList.add(app);
rm1.waitForState(app.getApplicationId(),RMAppState.NEW_SAVING);
}
Map rmAppState=memStore.getState().getApplicationState();
Assert.assertTrue(rmAppState.size() == 0);
rm1.stop();
for ( RMApp app : appList) {
ApplicationState appState=rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app.getApplicationSubmissionContext().getApplicationId());
}
Assert.assertTrue(rmAppState.size() == NUM_APPS);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRMRestartSucceededApp() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
FinishApplicationMasterRequest req=FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED,"diagnostics","trackingUrl");
finishApplicationMaster(app0,rm1,nm1,am0,req);
ApplicationState appState=rmAppState.get(app0.getApplicationId());
ApplicationAttemptState attemptState0=appState.getAttempt(am0.getApplicationAttemptId());
Assert.assertEquals("diagnostics",attemptState0.getDiagnostics());
Assert.assertEquals(FinalApplicationStatus.SUCCEEDED,attemptState0.getFinalApplicationStatus());
Assert.assertEquals("trackingUrl",attemptState0.getFinalTrackingUrl());
Assert.assertEquals(app0.getFinishTime(),appState.getFinishTime());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
ApplicationReport appReport=verifyAppReportAfterRMRestart(app0,rm2);
Assert.assertEquals(FinalApplicationStatus.SUCCEEDED,appReport.getFinalApplicationStatus());
Assert.assertEquals("trackingUrl",appReport.getOriginalTrackingUrl());
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testDelegationTokenRestoredInDelegationTokenRenewer() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new TestSecurityMockRM(conf,memStore);
rm1.start();
HashSet> tokenSet=new HashSet>();
Credentials ts=new Credentials();
Text userText1=new Text("user1");
RMDelegationTokenIdentifier dtId1=new RMDelegationTokenIdentifier(userText1,new Text("renewer1"),userText1);
Token token1=new Token(dtId1,rm1.getRMContext().getRMDelegationTokenSecretManager());
SecurityUtil.setTokenService(token1,rmAddr);
ts.addToken(userText1,token1);
tokenSet.add(token1);
Text userText2=new Text("user2");
RMDelegationTokenIdentifier dtId2=new RMDelegationTokenIdentifier(userText2,new Text("renewer2"),userText2);
Token token2=new Token(dtId2,rm1.getRMContext().getRMDelegationTokenSecretManager());
SecurityUtil.setTokenService(token2,rmAddr);
ts.addToken(userText2,token2);
tokenSet.add(token2);
RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,ts);
ApplicationState appState=rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(tokenSet,rm1.getRMContext().getDelegationTokenRenewer().getDelegationTokens());
DataOutputBuffer dob=new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength());
securityTokens.rewind();
Assert.assertEquals(securityTokens,appState.getApplicationSubmissionContext().getAMContainerSpec().getTokens());
MockRM rm2=new TestSecurityMockRM(conf,memStore);
rm2.start();
waitForTokensToBeRenewed(rm2);
Assert.assertEquals(tokenSet,rm2.getRMContext().getDelegationTokenRenewer().getDelegationTokens());
rm1.stop();
rm2.stop();
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testFinishedAppRemovalAfterRMRestart() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,1);
memStore.init(conf);
RMState rmState=memStore.getState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
finishApplicationMaster(app0,rm1,nm1,am0);
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm1=rm2.registerNode("127.0.0.1:1234",15120);
Map rmAppState=rmState.getApplicationState();
Assert.assertEquals(RMAppState.FINISHED,rmAppState.get(app0.getApplicationId()).getState());
rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED);
RMApp app1=rm2.submitApp(200);
MockAM am1=launchAM(app1,rm2,nm1);
finishApplicationMaster(app1,rm2,nm1,am1);
Assert.assertNull(rm2.getRMContext().getRMApps().get(app0.getApplicationId()));
Assert.assertNull(rmAppState.get(app0.getApplicationId()));
rm1.stop();
rm2.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartGetApplicationList() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am0=launchAM(app0,rm1,nm1);
finishApplicationMaster(app0,rm1,nm1,am0);
RMApp app1=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am1=launchAM(app1,rm1,nm1);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app1.getApplicationId(),RMAppState.FAILED);
RMApp app2=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am2=launchAM(app2,rm1,nm1);
rm1.killApp(app2.getApplicationId());
rm1.waitForState(app2.getApplicationId(),RMAppState.KILLED);
rm1.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.KILLED);
MockRM rm2=new MockRM(conf,memStore){
@Override protected RMAppManager createRMAppManager(){
return spy(super.createRMAppManager());
}
}
;
rm2.start();
GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED));
GetApplicationsResponse response1=rm2.getClientRMService().getApplications(request1);
List appList1=response1.getApplicationList();
boolean forApp0=false, forApp1=false, forApp2=false;
for ( ApplicationReport report : appList1) {
if (report.getApplicationId().equals(app0.getApplicationId())) {
Assert.assertEquals(YarnApplicationState.FINISHED,report.getYarnApplicationState());
forApp0=true;
}
if (report.getApplicationId().equals(app1.getApplicationId())) {
Assert.assertEquals(YarnApplicationState.FAILED,report.getYarnApplicationState());
forApp1=true;
}
if (report.getApplicationId().equals(app2.getApplicationId())) {
Assert.assertEquals(YarnApplicationState.KILLED,report.getYarnApplicationState());
forApp2=true;
}
}
Assert.assertTrue(forApp0 && forApp1 && forApp2);
Set appTypes=new HashSet();
appTypes.add("myType");
GetApplicationsRequest request2=GetApplicationsRequest.newInstance(appTypes);
GetApplicationsResponse response2=rm2.getClientRMService().getApplications(request2);
List appList2=response2.getApplicationList();
Assert.assertTrue(3 == appList2.size());
verify(rm2.getRMAppManager(),times(3)).logApplicationSummary(isA(ApplicationId.class));
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartOnMaxAppAttempts() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,null);
RMApp app2=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null);
ApplicationState appState=rmAppState.get(app1.getApplicationId());
Assert.assertNotNull(appState);
Assert.assertEquals(0,appState.getAttemptCount());
Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app1.getApplicationSubmissionContext().getApplicationId());
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app1.getCurrentAppAttempt();
ApplicationAttemptId attemptId1=attempt.getAppAttemptId();
rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
Assert.assertEquals(1,appState.getAttemptCount());
ApplicationAttemptState attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,3000);
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
Assert.assertEquals(2,rm2.getRMContext().getRMApps().get(app2.getApplicationId()).getMaxAppAttempts());
Assert.assertEquals(2,rm2.getRMContext().getRMApps().size());
rm2.waitForState(app1.getApplicationId(),RMAppState.FAILED);
rm2.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(RMAppState.FAILED,rmAppState.get(app1.getApplicationId()).getState());
Assert.assertNull(rmAppState.get(app2.getApplicationId()).getState());
rm1.stop();
rm2.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRMRestartWaitForPreviousAMToFinish() throws Exception {
YarnConfiguration conf=new YarnConfiguration(this.conf);
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,40);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
final MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",16382,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am1=launchAM(app1,rm1,nm1);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
MockAM am2=launchAM(app1,rm1,nm1);
Assert.assertEquals(1,rmAppState.size());
Assert.assertEquals(app1.getState(),RMAppState.RUNNING);
Assert.assertEquals(app1.getAppAttempts().get(app1.getCurrentAppAttempt().getAppAttemptId()).getAppAttemptState(),RMAppAttemptState.RUNNING);
MockRM rm2=null;
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
NodeHeartbeatResponse res=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.RESYNC,res.getNodeAction());
RMApp rmApp=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
rm2.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(RMAppState.ACCEPTED,rmApp.getState());
Assert.assertEquals(2,rmApp.getAppAttempts().size());
rm2.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.FAILED);
rm2.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.LAUNCHED);
Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(am1.getApplicationAttemptId()).getAppAttemptState());
Assert.assertEquals(RMAppAttemptState.LAUNCHED,rmApp.getAppAttempts().get(am2.getApplicationAttemptId()).getAppAttemptState());
NMContainerStatus status=TestRMRestart.createNMContainerStatus(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(status),null);
rm2.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.FAILED);
launchAM(rmApp,rm2,nm1);
Assert.assertEquals(3,rmApp.getAppAttempts().size());
rm2.waitForState(rmApp.getCurrentAppAttempt().getAppAttemptId(),RMAppAttemptState.RUNNING);
conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,10000);
MockRM rm3=null;
rm3=new MockRM(conf,memStore);
rm3.start();
nm1.setResourceTrackerService(rm3.getResourceTrackerService());
rmApp=rm3.getRMContext().getRMApps().get(app1.getApplicationId());
rm3.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(rmApp.getState(),RMAppState.ACCEPTED);
Assert.assertEquals(3,rmApp.getAppAttempts().size());
rm3.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.FAILED);
rm3.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.FAILED);
ApplicationAttemptId latestAppAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
rm3.waitForState(latestAppAttemptId,RMAppAttemptState.LAUNCHED);
Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(am1.getApplicationAttemptId()).getAppAttemptState());
Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(am2.getApplicationAttemptId()).getAppAttemptState());
Assert.assertEquals(RMAppAttemptState.LAUNCHED,rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState());
rm3.waitForState(latestAppAttemptId,RMAppAttemptState.FAILED);
rm3.waitForState(rmApp.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(4,rmApp.getAppAttempts().size());
Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState());
latestAppAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId();
RMApp app2=rm3.submitApp(200);
rm3.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(1,app2.getAppAttempts().size());
Assert.assertEquals(0,memStore.getState().getApplicationState().get(app2.getApplicationId()).getAttemptCount());
MockRM rm4=null;
rm4=new MockRM(conf,memStore);
rm4.start();
rmApp=rm4.getRMContext().getRMApps().get(app1.getApplicationId());
rm4.waitForState(rmApp.getApplicationId(),RMAppState.ACCEPTED);
int timeoutSecs=0;
while (rmApp.getAppAttempts().size() != 2 && timeoutSecs++ < 40) {
Thread.sleep(200);
}
Assert.assertEquals(4,rmApp.getAppAttempts().size());
Assert.assertEquals(RMAppState.ACCEPTED,rmApp.getState());
rm4.waitForState(latestAppAttemptId,RMAppAttemptState.SCHEDULED);
Assert.assertEquals(RMAppAttemptState.SCHEDULED,rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState());
app2=rm4.getRMContext().getRMApps().get(app2.getApplicationId());
rm4.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED);
Assert.assertEquals(RMAppState.ACCEPTED,app2.getState());
Assert.assertEquals(1,app2.getAppAttempts().size());
rm4.waitForState(app2.getCurrentAppAttempt().getAppAttemptId(),RMAppAttemptState.SCHEDULED);
Assert.assertEquals(RMAppAttemptState.SCHEDULED,app2.getCurrentAppAttempt().getAppAttemptState());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testAppAttemptTokensRestoredOnRMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new TestSecurityMockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("0.0.0.0:4321",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),"default");
ApplicationState appState=rmAppState.get(app1.getApplicationId());
Assert.assertNotNull(appState);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
ApplicationAttemptId attemptId1=attempt1.getAppAttemptId();
rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED);
ApplicationAttemptState attemptState=appState.getAttempt(attemptId1);
Assert.assertNotNull(attemptState);
Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId());
byte[] clientTokenMasterKey=attempt1.getClientTokenMasterKey().getEncoded();
Credentials savedCredentials=attemptState.getAppAttemptCredentials();
Assert.assertArrayEquals("client token master key not saved",clientTokenMasterKey,savedCredentials.getSecretKey(RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME));
MockRM rm2=new TestSecurityMockRM(conf,memStore);
rm2.start();
RMApp loadedApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
RMAppAttempt loadedAttempt1=loadedApp1.getRMAppAttempt(attemptId1);
Assert.assertNotNull(loadedAttempt1);
Assert.assertEquals("client token master key not restored",attempt1.getClientTokenMasterKey(),loadedAttempt1.getClientTokenMasterKey());
Assert.assertArrayEquals(clientTokenMasterKey,rm2.getClientToAMTokenSecretManager().getMasterKey(attemptId1).getEncoded());
Token amrmToken=loadedAttempt1.getAMRMToken();
Assert.assertArrayEquals(amrmToken.getPassword(),rm2.getRMContext().getAMRMTokenSecretManager().retrievePassword(amrmToken.decodeIdentifier()));
rm1.stop();
rm2.stop();
}
UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testRMShutdown() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore(){
@Override public synchronized void checkVersion() throws Exception {
throw new Exception("Invalid version.");
}
}
;
memStore.init(conf);
MockRM rm1=null;
try {
rm1=new MockRM(conf,memStore);
rm1.start();
Assert.fail();
}
catch ( Exception e) {
Assert.assertTrue(e.getMessage().contains("Invalid version."));
}
Assert.assertTrue(rm1.getServiceState() == STATE.STOPPED);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testClientRetryOnKillingApplication() throws Exception {
MemoryRMStateStore memStore=new TestMemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType");
MockAM am1=launchAM(app1,rm1,nm1);
KillApplicationResponse response;
int count=0;
while (true) {
response=rm1.killApp(app1.getApplicationId());
if (response.getIsKillCompleted()) {
break;
}
Thread.sleep(100);
count++;
}
Assert.assertTrue(count >= 1);
rm1.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.KILLED);
rm1.waitForState(app1.getApplicationId(),RMAppState.KILLED);
Assert.assertEquals(1,((TestMemoryRMStateStore)memStore).updateAttempt);
Assert.assertEquals(2,((TestMemoryRMStateStore)memStore).updateApp);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=60000) public void testRMRestartKilledApp() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=launchAM(app0,rm1,nm1);
rm1.killApp(app0.getApplicationId());
rm1.waitForState(app0.getApplicationId(),RMAppState.KILLED);
rm1.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.KILLED);
ApplicationState appState=rmAppState.get(app0.getApplicationId());
Assert.assertEquals(RMAppState.KILLED,appState.getState());
Assert.assertEquals(RMAppAttemptState.KILLED,appState.getAttempt(am0.getApplicationAttemptId()).getState());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId());
rm2.waitForState(app0.getApplicationId(),RMAppState.KILLED);
rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.KILLED);
Assert.assertEquals(1,loadedApp0.getAppAttempts().size());
ApplicationReport appReport=verifyAppReportAfterRMRestart(app0,rm2);
Assert.assertEquals(app0.getDiagnostics().toString(),appReport.getDiagnostics());
rm1.stop();
rm2.stop();
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMRestartWaitForPreviousSucceededAttempt() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
MemoryRMStateStore memStore=new MemoryRMStateStore(){
int count=0;
@Override public void updateApplicationStateInternal( ApplicationId appId, ApplicationStateData appStateData) throws Exception {
if (count == 0) {
LOG.info(appId + " final state is not saved.");
count++;
}
else {
super.updateApplicationStateInternal(appId,appStateData);
}
}
}
;
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=rm1.registerNode("127.0.0.1:1234",15120);
RMApp app0=rm1.submitApp(200);
MockAM am0=MockRM.launchAndRegisterAM(app0,rm1,nm1);
FinishApplicationMasterRequest req=FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED,"","");
am0.unregisterAppAttempt(req,true);
am0.waitForState(RMAppAttemptState.FINISHING);
Assert.assertNull(rmAppState.get(app0.getApplicationId()).getState());
MockRM rm2=new MockRM(conf,memStore);
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
rm2.start();
rm2.waitForState(app0.getCurrentAppAttempt().getAppAttemptId(),RMAppAttemptState.FINISHED);
rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED);
Assert.assertEquals(RMAppState.FINISHED,rmAppState.get(app0.getApplicationId()).getState());
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=60000) public void testRMDelegationTokenRestoredOnRMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
conf.set(YarnConfiguration.RM_ADDRESS,"localhost:8032");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmAppState=rmState.getApplicationState();
Map rmDTState=rmState.getRMDTSecretManagerState().getTokenState();
Set rmDTMasterKeyState=rmState.getRMDTSecretManagerState().getMasterKeyState();
MockRM rm1=new TestSecurityMockRM(conf,memStore);
rm1.start();
Credentials ts=new Credentials();
GetDelegationTokenRequest request1=GetDelegationTokenRequest.newInstance("renewer1");
UserGroupInformation.getCurrentUser().setAuthenticationMethod(AuthMethod.KERBEROS);
GetDelegationTokenResponse response1=rm1.getClientRMService().getDelegationToken(request1);
org.apache.hadoop.yarn.api.records.Token delegationToken1=response1.getRMDelegationToken();
Token token1=ConverterUtils.convertFromYarn(delegationToken1,rmAddr);
RMDelegationTokenIdentifier dtId1=token1.decodeIdentifier();
HashSet tokenIdentSet=new HashSet();
ts.addToken(token1.getService(),token1);
tokenIdentSet.add(dtId1);
RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,ts);
ApplicationState appState=rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
Set allKeysRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys();
Assert.assertEquals(allKeysRM1,rmDTMasterKeyState);
Map allTokensRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Assert.assertEquals(tokenIdentSet,allTokensRM1.keySet());
Assert.assertEquals(allTokensRM1,rmDTState);
Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rmState.getRMDTSecretManagerState().getDTSequenceNumber());
GetDelegationTokenRequest request2=GetDelegationTokenRequest.newInstance("renewer2");
GetDelegationTokenResponse response2=rm1.getClientRMService().getDelegationToken(request2);
org.apache.hadoop.yarn.api.records.Token delegationToken2=response2.getRMDelegationToken();
Token token2=ConverterUtils.convertFromYarn(delegationToken2,rmAddr);
RMDelegationTokenIdentifier dtId2=token2.decodeIdentifier();
try {
rm1.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token2,UserGroupInformation.getCurrentUser().getUserName());
}
catch ( Exception e) {
Assert.fail();
}
Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),dtId2.getSequenceNumber());
Assert.assertFalse(rmDTState.containsKey(dtId2));
MockRM rm2=new TestSecurityMockRM(conf,memStore);
rm2.start();
Map allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Assert.assertEquals(allTokensRM2.keySet(),allTokensRM1.keySet());
Assert.assertTrue(rm2.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys().containsAll(allKeysRM1));
Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rm2.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber());
Long renewDateBeforeRenew=allTokensRM2.get(dtId1);
try {
Thread.sleep(1);
rm2.getRMContext().getRMDelegationTokenSecretManager().renewToken(token1,"renewer1");
}
catch ( Exception e) {
Assert.fail();
}
allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Long renewDateAfterRenew=allTokensRM2.get(dtId1);
Assert.assertTrue(renewDateAfterRenew > renewDateBeforeRenew);
Assert.assertTrue(rmDTState.containsValue(renewDateAfterRenew));
Assert.assertFalse(rmDTState.containsValue(renewDateBeforeRenew));
try {
rm2.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token1,UserGroupInformation.getCurrentUser().getUserName());
}
catch ( Exception e) {
Assert.fail();
}
allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens();
Assert.assertFalse(allTokensRM2.containsKey(dtId1));
Assert.assertFalse(rmDTState.containsKey(dtId1));
rm1.stop();
rm2.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=50000) public void testFilterOverrides() throws Exception {
String filterInitializerConfKey="hadoop.http.filter.initializers";
String[] filterInitializers={AuthenticationFilterInitializer.class.getName(),RMAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + "," + RMAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + ", " + RMAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + ", " + this.getClass().getName()};
for ( String filterInitializer : filterInitializers) {
resourceManager=new ResourceManager();
Configuration conf=new YarnConfiguration();
conf.set(filterInitializerConfKey,filterInitializer);
conf.set("hadoop.security.authentication","kerberos");
conf.set("hadoop.http.authentication.type","kerberos");
try {
try {
UserGroupInformation.setConfiguration(conf);
}
catch ( Exception e) {
LOG.info("Got expected exception");
}
resourceManager.init(conf);
resourceManager.startWepApp();
}
catch ( RuntimeException e) {
String tmp=resourceManager.getConfig().get(filterInitializerConfKey);
if (filterInitializer.contains(this.getClass().getName())) {
Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName() + "," + this.getClass().getName(),tmp);
}
else {
Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName(),tmp);
}
resourceManager.stop();
}
}
String[] simpleFilterInitializers={"",StaticUserWebFilter.class.getName()};
for ( String filterInitializer : simpleFilterInitializers) {
resourceManager=new ResourceManager();
Configuration conf=new YarnConfiguration();
conf.set(filterInitializerConfKey,filterInitializer);
try {
UserGroupInformation.setConfiguration(conf);
resourceManager.init(conf);
resourceManager.startWepApp();
}
catch ( RuntimeException e) {
String tmp=resourceManager.getConfig().get(filterInitializerConfKey);
if (filterInitializer.equals(StaticUserWebFilter.class.getName())) {
Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName() + "," + StaticUserWebFilter.class.getName(),tmp);
}
else {
Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName(),tmp);
}
resourceManager.stop();
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeRegistrationFailure() throws Exception {
writeToHostsFile("host1");
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
rm=new MockRM(conf);
rm.start();
ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService();
RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId=NodeId.newInstance("host2",1234);
req.setNodeId(nodeId);
req.setHttpPort(1234);
RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction());
Assert.assertEquals("Disallowed NodeManager from host2, Sending SHUTDOWN signal to the NodeManager.",response.getDiagnosticsMessage());
}
InternalCallVerifier EqualityVerifier
/**
* Decommissioning using a post-configured include hosts file
*/
@Test public void testAddNewIncludePathToConfiguration() throws Exception {
Configuration conf=new Configuration();
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",10240);
ClusterMetrics metrics=ClusterMetrics.getMetrics();
assert (metrics != null);
int initialMetricCount=metrics.getNumDecommisionedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
writeToHostsFile("host1");
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
rm.getNodesListManager().refreshNodes(conf);
nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertEquals("Node should not have been decomissioned.",NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertEquals("Node should have been decomissioned but is in state" + nodeHeartbeat.getNodeAction(),NodeAction.SHUTDOWN,nodeHeartbeat.getNodeAction());
checkDecommissionedNMCount(rm,++initialMetricCount);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testReboot() throws Exception {
Configuration conf=new Configuration();
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:1234",2048);
int initialMetricCount=ClusterMetrics.getMetrics().getNumRebootedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm2.nodeHeartbeat(new HashMap>(),true,-100);
Assert.assertTrue(NodeAction.RESYNC.equals(nodeHeartbeat.getNodeAction()));
Assert.assertEquals("Too far behind rm response id:0 nm response id:-100",nodeHeartbeat.getDiagnosticsMessage());
checkRebootedNMCount(rm,++initialMetricCount);
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeRegistrationWithMinimumAllocations() throws Exception {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,"2048");
conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,"4");
rm=new MockRM(conf);
rm.start();
ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService();
RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId=BuilderUtils.newNodeId("host",1234);
req.setNodeId(nodeId);
Resource capability=BuilderUtils.newResource(1024,1);
req.setResource(capability);
RegisterNodeManagerResponse response1=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response1.getNodeAction());
capability.setMemory(2048);
capability.setVirtualCores(1);
req.setResource(capability);
RegisterNodeManagerResponse response2=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response2.getNodeAction());
capability.setMemory(1024);
capability.setVirtualCores(4);
req.setResource(capability);
RegisterNodeManagerResponse response3=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response3.getNodeAction());
capability.setMemory(2048);
capability.setVirtualCores(4);
req.setResource(capability);
RegisterNodeManagerResponse response4=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.NORMAL,response4.getNodeAction());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Decommissioning using a pre-configured include hosts file
*/
@Test public void testDecommissionWithIncludeHosts() throws Exception {
writeToHostsFile("localhost","host1","host2");
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",10240);
MockNM nm3=rm.registerNode("localhost:4433",1024);
ClusterMetrics metrics=ClusterMetrics.getMetrics();
assert (metrics != null);
int metricCount=metrics.getNumDecommisionedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm3.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
String ip=NetUtils.normalizeHostName("localhost");
writeToHostsFile("host1",ip);
rm.getNodesListManager().refreshNodes(conf);
nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
Assert.assertEquals(0,ClusterMetrics.getMetrics().getNumDecommisionedNMs());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertTrue("Node is not decommisioned.",NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
checkDecommissionedNMCount(rm,++metricCount);
nodeHeartbeat=nm3.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
Assert.assertEquals(metricCount,ClusterMetrics.getMetrics().getNumDecommisionedNMs());
}
InternalCallVerifier EqualityVerifier
/**
* Test RM read NM next heartBeat Interval correctly from Configuration file,
* and NM get next heartBeat Interval from RM correctly
*/
@Test(timeout=50000) public void testGetNextHeartBeatInterval() throws Exception {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS,"4000");
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",10240);
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertEquals(4000,nodeHeartbeat.getNextHeartBeatInterval());
NodeHeartbeatResponse nodeHeartbeat2=nm2.nodeHeartbeat(true);
Assert.assertEquals(4000,nodeHeartbeat2.getNextHeartBeatInterval());
}
InternalCallVerifier EqualityVerifier
@Test public void testSetRMIdentifierInRegistration() throws Exception {
Configuration conf=new Configuration();
rm=new MockRM(conf);
rm.start();
MockNM nm=new MockNM("host1:1234",5120,rm.getResourceTrackerService());
RegisterNodeManagerResponse response=nm.registerNode();
Assert.assertEquals(ResourceManager.getClusterTimeStamp(),response.getRMIdentifier());
}
InternalCallVerifier EqualityVerifier
/**
* Decommissioning using a post-configured exclude hosts file
*/
@Test public void testAddNewExcludePathToConfiguration() throws Exception {
Configuration conf=new Configuration();
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",10240);
ClusterMetrics metrics=ClusterMetrics.getMetrics();
assert (metrics != null);
int initialMetricCount=metrics.getNumDecommisionedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
writeToHostsFile("host2");
conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,hostFile.getAbsolutePath());
rm.getNodesListManager().refreshNodes(conf);
nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertEquals("Node should not have been decomissioned.",NodeAction.NORMAL,nodeHeartbeat.getNodeAction());
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertEquals("Node should have been decomissioned but is in state" + nodeHeartbeat.getNodeAction(),NodeAction.SHUTDOWN,nodeHeartbeat.getNodeAction());
checkDecommissionedNMCount(rm,++initialMetricCount);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNodeRegistrationVersionLessThanRM() throws Exception {
writeToHostsFile("host2");
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
conf.set(YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION,"EqualToRM");
rm=new MockRM(conf);
rm.start();
String nmVersion="1.9.9";
ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService();
RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId=NodeId.newInstance("host2",1234);
Resource capability=BuilderUtils.newResource(1024,1);
req.setResource(capability);
req.setNodeId(nodeId);
req.setHttpPort(1234);
req.setNMVersion(nmVersion);
RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction());
Assert.assertTrue("Diagnostic message did not contain: 'Disallowed NodeManager " + "Version " + nmVersion + ", is less than the minimum version'",response.getDiagnosticsMessage().contains("Disallowed NodeManager Version " + nmVersion + ", is less than the minimum version "));
}
InternalCallVerifier EqualityVerifier
@Test public void testNodeRegistrationSuccess() throws Exception {
writeToHostsFile("host2");
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath());
rm=new MockRM(conf);
rm.start();
ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService();
RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class);
NodeId nodeId=NodeId.newInstance("host2",1234);
Resource capability=BuilderUtils.newResource(1024,1);
req.setResource(capability);
req.setNodeId(nodeId);
req.setHttpPort(1234);
req.setNMVersion(YarnVersionInfo.getVersion());
RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req);
Assert.assertEquals(NodeAction.NORMAL,response.getNodeAction());
}
InternalCallVerifier BooleanVerifier
/**
* Decommissioning using a pre-configured exclude hosts file
*/
@Test public void testDecommissionWithExcludeHosts() throws Exception {
Configuration conf=new Configuration();
conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,hostFile.getAbsolutePath());
writeToHostsFile("");
rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",10240);
MockNM nm3=rm.registerNode("localhost:4433",1024);
int metricCount=ClusterMetrics.getMetrics().getNumDecommisionedNMs();
NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
String ip=NetUtils.normalizeHostName("localhost");
writeToHostsFile("host2",ip);
rm.getNodesListManager().refreshNodes(conf);
checkDecommissionedNMCount(rm,metricCount + 2);
nodeHeartbeat=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm2.nodeHeartbeat(true);
Assert.assertTrue("The decommisioned metrics are not updated",NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
nodeHeartbeat=nm3.nodeHeartbeat(true);
Assert.assertTrue("The decommisioned metrics are not updated",NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction()));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testReconnectNode() throws Exception {
final DrainDispatcher dispatcher=new DrainDispatcher();
rm=new MockRM(){
@Override protected EventHandler createSchedulerEventDispatcher(){
return new SchedulerEventDispatcher(this.scheduler){
@Override public void handle( SchedulerEvent event){
scheduler.handle(event);
}
}
;
}
@Override protected Dispatcher createDispatcher(){
return dispatcher;
}
}
;
rm.start();
MockNM nm1=rm.registerNode("host1:1234",5120);
MockNM nm2=rm.registerNode("host2:5678",5120);
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(false);
dispatcher.await();
checkUnealthyNMCount(rm,nm2,true,1);
final int expectedNMs=ClusterMetrics.getMetrics().getNumActiveNMs();
QueueMetrics metrics=rm.getResourceScheduler().getRootQueueMetrics();
Assert.assertEquals(5120,metrics.getAvailableMB());
nm1=rm.registerNode("host1:1234",5120);
NodeHeartbeatResponse response=nm1.nodeHeartbeat(true);
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
dispatcher.await();
Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs());
checkUnealthyNMCount(rm,nm2,true,1);
nm2=rm.registerNode("host2:5678",5120);
response=nm2.nodeHeartbeat(false);
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
dispatcher.await();
Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs());
checkUnealthyNMCount(rm,nm2,true,1);
nm2=rm.registerNode("host2:5678",5120);
dispatcher.await();
response=nm2.nodeHeartbeat(true);
response=nm2.nodeHeartbeat(true);
dispatcher.await();
Assert.assertEquals(5120 + 5120,metrics.getAvailableMB());
nm1=rm.registerNode("host2:5678",10240);
dispatcher.await();
response=nm1.nodeHeartbeat(true);
dispatcher.await();
Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction()));
Assert.assertEquals(5120 + 10240,metrics.getAvailableMB());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testHandleRMHADuringSubmitApplicationCallWithSavedApplicationState() throws Exception {
startRMs();
RMApp app0=rm1.submitApp(200);
explicitFailover();
Assert.assertTrue(rm2.getRMContext().getRMApps().containsKey(app0.getApplicationId()));
RMApp app1=rm2.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false,true,app0.getApplicationId());
Assert.assertEquals(app1.getApplicationId(),app0.getApplicationId());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testHandleRMHADuringSubmitApplicationCallWithoutSavedApplicationState() throws Exception {
startRMsWithCustomizedRMAppManager();
RMApp app0=rm1.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false);
explicitFailover();
Assert.assertFalse(rm2.getRMContext().getRMApps().containsKey(app0.getApplicationId()));
RMApp app1=rm2.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false,true,app0.getApplicationId());
verifySubmitApp(rm2,app1,app0.getApplicationId());
Assert.assertTrue(rm2.getRMContext().getRMApps().containsKey(app0.getApplicationId()));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test multiple calls of getApplicationReport, to make sure
* it is idempotent
*/
@Test public void testGetApplicationReportIdempotent() throws Exception {
startRMs();
RMApp app=rm1.submitApp(200);
ApplicationReport appReport1=rm1.getApplicationReport(app.getApplicationId());
Assert.assertTrue(appReport1.getYarnApplicationState() == YarnApplicationState.ACCEPTED || appReport1.getYarnApplicationState() == YarnApplicationState.SUBMITTED);
ApplicationReport appReport2=rm1.getApplicationReport(app.getApplicationId());
Assert.assertEquals(appReport1.getApplicationId(),appReport2.getApplicationId());
Assert.assertEquals(appReport1.getYarnApplicationState(),appReport2.getYarnApplicationState());
explicitFailover();
ApplicationReport appReport3=rm2.getApplicationReport(app.getApplicationId());
Assert.assertEquals(appReport1.getApplicationId(),appReport3.getApplicationId());
Assert.assertEquals(appReport1.getYarnApplicationState(),appReport3.getYarnApplicationState());
ApplicationReport appReport4=rm2.getApplicationReport(app.getApplicationId());
Assert.assertEquals(appReport3.getApplicationId(),appReport4.getApplicationId());
Assert.assertEquals(appReport3.getYarnApplicationState(),appReport4.getYarnApplicationState());
}
InternalCallVerifier BooleanVerifier
@Test public void testHandleRMHAafterSubmitApplicationCallWithSavedApplicationState() throws Exception {
startRMs();
RMApp app0=rm1.submitApp(200);
explicitFailover();
ApplicationReport appReport=rm2.getApplicationReport(app0.getApplicationId());
Assert.assertTrue(appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED || appReport.getYarnApplicationState() == YarnApplicationState.SUBMITTED);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void testAMContainerStatusWithRMRestart() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1_1=rm1.submitApp(1024);
MockAM am1_1=MockRM.launchAndRegisterAM(app1_1,rm1,nm1);
RMAppAttempt attempt0=app1_1.getCurrentAppAttempt();
AbstractYarnScheduler scheduler=((AbstractYarnScheduler)rm1.getResourceScheduler());
Assert.assertTrue(scheduler.getRMContainer(attempt0.getMasterContainer().getId()).isAMContainer());
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
List am1_1Containers=createNMContainerStatusForApp(am1_1);
nm1.registerNode(am1_1Containers,null);
waitForNumContainersToRecover(2,rm2,am1_1.getApplicationAttemptId());
scheduler=((AbstractYarnScheduler)rm2.getResourceScheduler());
Assert.assertTrue(scheduler.getRMContainer(attempt0.getMasterContainer().getId()).isAMContainer());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testContainersNotRecoveredForCompletedApps() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am1);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(runningContainer,completedContainer),null);
RMApp recoveredApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
assertEquals(RMAppState.FINISHED,recoveredApp1.getState());
Thread.sleep(3000);
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler();
assertNull(scheduler.getRMContainer(runningContainer.getContainerId()));
assertNull(scheduler.getRMContainer(completedContainer.getContainerId()));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test(timeout=20000) public void testRecoverSchedulerAppAndAttemptSynchronously() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0=rm1.submitApp(200);
MockAM am0=MockRM.launchAndRegisterAM(app0,rm1,nm1);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
Assert.assertNotNull(rm2.getResourceScheduler().getSchedulerAppInfo(am0.getApplicationAttemptId()));
((AbstractYarnScheduler)rm2.getResourceScheduler()).getTransferredContainers(am0.getApplicationAttemptId());
List containers=createNMContainerStatusForApp(am0);
nm1.registerNode(containers,null);
waitForNumContainersToRecover(2,rm2,am0.getApplicationAttemptId());
}
APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testSchedulerRecovery() throws Exception {
conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS,true);
conf.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,DominantResourceCalculator.class.getName());
int containerMemory=1024;
Resource containerResource=Resource.newInstance(containerMemory,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
rm1.clearQueueMetrics(app1);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
RMApp recoveredApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId());
RMAppAttempt loadedAttempt1=recoveredApp1.getCurrentAppAttempt();
NMContainerStatus amContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),1,ContainerState.RUNNING);
NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(amContainer,runningContainer,completedContainer),null);
waitForNumContainersToRecover(2,rm2,am1.getApplicationAttemptId());
Set launchedContainers=((RMNodeImpl)rm2.getRMContext().getRMNodes().get(nm1.getNodeId())).getLaunchedContainers();
assertTrue(launchedContainers.contains(amContainer.getContainerId()));
assertTrue(launchedContainers.contains(runningContainer.getContainerId()));
rm2.waitForState(nm1,amContainer.getContainerId(),RMContainerState.RUNNING);
rm2.waitForState(nm1,runningContainer.getContainerId(),RMContainerState.RUNNING);
rm2.waitForContainerToComplete(loadedAttempt1,completedContainer);
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler();
SchedulerNode schedulerNode1=scheduler.getSchedulerNode(nm1.getNodeId());
Resource usedResources=Resources.multiply(containerResource,2);
Resource nmResource=Resource.newInstance(nm1.getMemory(),nm1.getvCores());
assertTrue(schedulerNode1.isValidContainer(amContainer.getContainerId()));
assertTrue(schedulerNode1.isValidContainer(runningContainer.getContainerId()));
assertFalse(schedulerNode1.isValidContainer(completedContainer.getContainerId()));
assertEquals(2,schedulerNode1.getNumContainers());
assertEquals(Resources.subtract(nmResource,usedResources),schedulerNode1.getAvailableResource());
assertEquals(usedResources,schedulerNode1.getUsedResource());
Resource availableResources=Resources.subtract(nmResource,usedResources);
Map schedulerApps=((AbstractYarnScheduler)rm2.getResourceScheduler()).getSchedulerApplications();
SchedulerApplication schedulerApp=schedulerApps.get(recoveredApp1.getApplicationId());
if (schedulerClass.equals(CapacityScheduler.class)) {
checkCSQueue(rm2,schedulerApp,nmResource,nmResource,usedResources,2);
}
else if (schedulerClass.equals(FifoScheduler.class)) {
checkFifoQueue(schedulerApp,usedResources,availableResources);
}
SchedulerApplicationAttempt schedulerAttempt=schedulerApp.getCurrentAppAttempt();
assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(amContainer.getContainerId())));
assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(runningContainer.getContainerId())));
assertEquals(schedulerAttempt.getCurrentConsumption(),usedResources);
if (scheduler.getClass() != FairScheduler.class) {
assertEquals(availableResources,schedulerAttempt.getHeadroom());
}
assertEquals((1 << 22) + 1,schedulerAttempt.getNewContainerId());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test(timeout=20000) public void testAMfailedBetweenRMRestart() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
rm2=new MockRM(conf,memStore);
rm2.start();
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
NMContainerStatus amContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
nm1.registerNode(Arrays.asList(amContainer,runningContainer,completedContainer),null);
rm2.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.FAILED);
Thread.sleep(3000);
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler();
assertNull(scheduler.getRMContainer(runningContainer.getContainerId()));
assertNull(scheduler.getRMContainer(completedContainer.getContainerId()));
rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
MockNM nm2=new MockNM("127.1.1.1:4321",8192,rm2.getResourceTrackerService());
NMContainerStatus previousAttemptContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),4,ContainerState.RUNNING);
nm2.registerNode(Arrays.asList(previousAttemptContainer),null);
Thread.sleep(3000);
assertNull(scheduler.getRMContainer(previousAttemptContainer.getContainerId()));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testWriteContainer() throws Exception {
RMContainer container=createRMContainer(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1));
writer.containerStarted(container);
ContainerHistoryData containerHD=null;
for (int i=0; i < MAX_RETRIES; ++i) {
containerHD=store.getContainer(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1));
if (containerHD != null) {
break;
}
else {
Thread.sleep(100);
}
}
Assert.assertNotNull(containerHD);
Assert.assertEquals(NodeId.newInstance("test host",-100),containerHD.getAssignedNode());
Assert.assertEquals(Resource.newInstance(-1,-1),containerHD.getAllocatedResource());
Assert.assertEquals(Priority.UNDEFINED,containerHD.getPriority());
Assert.assertEquals(0L,container.getCreationTime());
writer.containerFinished(container);
for (int i=0; i < MAX_RETRIES; ++i) {
containerHD=store.getContainer(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1));
if (containerHD.getContainerState() != null) {
break;
}
else {
Thread.sleep(100);
}
}
Assert.assertEquals("test diagnostics info",containerHD.getDiagnosticsInfo());
Assert.assertEquals(-1,containerHD.getContainerExitStatus());
Assert.assertEquals(ContainerState.COMPLETE,containerHD.getContainerState());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testWriteApplicationAttempt() throws Exception {
RMAppAttempt appAttempt=createRMAppAttempt(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1));
writer.applicationAttemptStarted(appAttempt);
ApplicationAttemptHistoryData appAttemptHD=null;
for (int i=0; i < MAX_RETRIES; ++i) {
appAttemptHD=store.getApplicationAttempt(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1));
if (appAttemptHD != null) {
break;
}
else {
Thread.sleep(100);
}
}
Assert.assertNotNull(appAttemptHD);
Assert.assertEquals("test host",appAttemptHD.getHost());
Assert.assertEquals(-100,appAttemptHD.getRPCPort());
Assert.assertEquals(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1),appAttemptHD.getMasterContainerId());
writer.applicationAttemptFinished(appAttempt,RMAppAttemptState.FINISHED);
for (int i=0; i < MAX_RETRIES; ++i) {
appAttemptHD=store.getApplicationAttempt(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1));
if (appAttemptHD.getYarnApplicationAttemptState() != null) {
break;
}
else {
Thread.sleep(100);
}
}
Assert.assertEquals("test diagnostics info",appAttemptHD.getDiagnosticsInfo());
Assert.assertEquals("test url",appAttemptHD.getTrackingURL());
Assert.assertEquals(FinalApplicationStatus.UNDEFINED,appAttemptHD.getFinalApplicationStatus());
Assert.assertEquals(YarnApplicationAttemptState.FINISHED,appAttemptHD.getYarnApplicationAttemptState());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testWriteApplication() throws Exception {
RMApp app=createRMApp(ApplicationId.newInstance(0,1));
writer.applicationStarted(app);
ApplicationHistoryData appHD=null;
for (int i=0; i < MAX_RETRIES; ++i) {
appHD=store.getApplication(ApplicationId.newInstance(0,1));
if (appHD != null) {
break;
}
else {
Thread.sleep(100);
}
}
Assert.assertNotNull(appHD);
Assert.assertEquals("test app",appHD.getApplicationName());
Assert.assertEquals("test app type",appHD.getApplicationType());
Assert.assertEquals("test user",appHD.getUser());
Assert.assertEquals("test queue",appHD.getQueue());
Assert.assertEquals(0L,appHD.getSubmitTime());
Assert.assertEquals(1L,appHD.getStartTime());
writer.applicationFinished(app,RMAppState.FINISHED);
for (int i=0; i < MAX_RETRIES; ++i) {
appHD=store.getApplication(ApplicationId.newInstance(0,1));
if (appHD.getYarnApplicationState() != null) {
break;
}
else {
Thread.sleep(100);
}
}
Assert.assertEquals(2L,appHD.getFinishTime());
Assert.assertEquals("test diagnostics info",appHD.getDiagnosticsInfo());
Assert.assertEquals(FinalApplicationStatus.UNDEFINED,appHD.getFinalApplicationStatus());
Assert.assertEquals(YarnApplicationState.FINISHED,appHD.getYarnApplicationState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAMRMUnusableNodes() throws Exception {
MockNM nm1=rm.registerNode("127.0.0.1:1234",10000);
MockNM nm2=rm.registerNode("127.0.0.2:1234",10000);
MockNM nm3=rm.registerNode("127.0.0.3:1234",10000);
MockNM nm4=rm.registerNode("127.0.0.4:1234",10000);
dispatcher.await();
RMApp app1=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
AllocateRequest allocateRequest1=AllocateRequest.newInstance(0,0F,null,null,null);
AllocateResponse response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
List updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(0,updatedNodes.size());
syncNodeHeartbeat(nm4,false);
allocateRequest1=AllocateRequest.newInstance(response1.getResponseId(),0F,null,null,null);
response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
NodeReport nr=updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.UNHEALTHY,nr.getNodeState());
response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
nr=updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.UNHEALTHY,nr.getNodeState());
syncNodeLost(nm3);
allocateRequest1=AllocateRequest.newInstance(response1.getResponseId(),0F,null,null,null);
response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
nr=updatedNodes.iterator().next();
Assert.assertEquals(nm3.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.LOST,nr.getNodeState());
RMApp app2=rm.submitApp(2000);
nm2.nodeHeartbeat(true);
RMAppAttempt attempt2=app2.getCurrentAppAttempt();
MockAM am2=rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
AllocateRequest allocateRequest2=AllocateRequest.newInstance(0,0F,null,null,null);
AllocateResponse response2=allocate(attempt2.getAppAttemptId(),allocateRequest2);
updatedNodes=response2.getUpdatedNodes();
Assert.assertEquals(0,updatedNodes.size());
syncNodeHeartbeat(nm4,true);
allocateRequest1=AllocateRequest.newInstance(response1.getResponseId(),0F,null,null,null);
response1=allocate(attempt1.getAppAttemptId(),allocateRequest1);
updatedNodes=response1.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
nr=updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.RUNNING,nr.getNodeState());
allocateRequest2=AllocateRequest.newInstance(response2.getResponseId(),0F,null,null,null);
response2=allocate(attempt2.getAppAttemptId(),allocateRequest2);
updatedNodes=response2.getUpdatedNodes();
Assert.assertEquals(1,updatedNodes.size());
nr=updatedNodes.iterator().next();
Assert.assertEquals(nm4.getNodeId(),nr.getNodeId());
Assert.assertEquals(NodeState.RUNNING,nr.getNodeState());
allocateRequest2=AllocateRequest.newInstance(response2.getResponseId(),0F,null,null,null);
response2=allocate(attempt2.getAppAttemptId(),allocateRequest2);
updatedNodes=response2.getUpdatedNodes();
Assert.assertEquals(0,updatedNodes.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testARRMResponseId() throws Exception {
MockNM nm1=rm.registerNode("h1:1234",5000);
RMApp app=rm.submitApp(2000);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
AllocateRequest allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null);
AllocateResponse response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertEquals(1,response.getResponseId());
Assert.assertTrue(response.getAMCommand() == null);
allocateRequest=AllocateRequest.newInstance(response.getResponseId(),0F,null,null,null);
response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertEquals(2,response.getResponseId());
response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertEquals(2,response.getResponseId());
allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null);
response=allocate(attempt.getAppAttemptId(),allocateRequest);
Assert.assertTrue(response.getAMCommand() == AMCommand.AM_RESYNC);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testAMRestartWithExistingContainers() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2);
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true);
MockNM nm1=new MockNM("127.0.0.1:1234",10240,rm1.getResourceTrackerService());
nm1.registerNode();
MockNM nm2=new MockNM("127.0.0.1:2351",4089,rm1.getResourceTrackerService());
nm2.registerNode();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
int NUM_CONTAINERS=3;
am1.allocate("127.0.0.1",1024,NUM_CONTAINERS,new ArrayList());
nm1.nodeHeartbeat(true);
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
while (containers.size() != NUM_CONTAINERS) {
nm1.nodeHeartbeat(true);
containers.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers());
Thread.sleep(200);
}
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING);
ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3);
rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING);
ContainerId containerId4=ContainerId.newInstance(am1.getApplicationAttemptId(),4);
rm1.waitForState(nm1,containerId4,RMContainerState.ACQUIRED);
am1.allocate("127.0.0.1",1024,1,new ArrayList());
nm1.nodeHeartbeat(true);
ContainerId containerId5=ContainerId.newInstance(am1.getApplicationAttemptId(),5);
rm1.waitForContainerAllocated(nm1,containerId5);
rm1.waitForState(nm1,containerId5,RMContainerState.ALLOCATED);
am1.allocate("127.0.0.1",6000,1,new ArrayList());
ContainerId containerId6=ContainerId.newInstance(am1.getApplicationAttemptId(),6);
nm1.nodeHeartbeat(true);
SchedulerApplicationAttempt schedulerAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId6);
while (schedulerAttempt.getReservedContainers().isEmpty()) {
System.out.println("Waiting for container " + containerId6 + " to be reserved.");
nm1.nodeHeartbeat(true);
Thread.sleep(200);
}
Assert.assertEquals(containerId6,schedulerAttempt.getReservedContainers().get(0).getContainerId());
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
Thread.sleep(3000);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId4));
Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId5));
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
ApplicationAttemptId newAttemptId=app1.getCurrentAppAttempt().getAppAttemptId();
Assert.assertFalse(newAttemptId.equals(am1.getApplicationAttemptId()));
RMAppAttempt attempt2=app1.getCurrentAppAttempt();
nm1.nodeHeartbeat(true);
MockAM am2=rm1.sendAMLaunched(attempt2.getAppAttemptId());
RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt();
Assert.assertEquals(2,registerResponse.getContainersFromPreviousAttempts().size());
boolean containerId2Exists=false, containerId3Exists=false;
for ( Container container : registerResponse.getContainersFromPreviousAttempts()) {
if (container.getId().equals(containerId2)) {
containerId2Exists=true;
}
if (container.getId().equals(containerId3)) {
containerId3Exists=true;
}
}
Assert.assertTrue(containerId2Exists && containerId3Exists);
rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE);
RMAppAttempt newAttempt=app1.getRMAppAttempt(am2.getApplicationAttemptId());
waitForContainersToFinish(4,newAttempt);
boolean container3Exists=false, container4Exists=false, container5Exists=false, container6Exists=false;
for ( ContainerStatus status : newAttempt.getJustFinishedContainers()) {
if (status.getContainerId().equals(containerId3)) {
container3Exists=true;
}
if (status.getContainerId().equals(containerId4)) {
container4Exists=true;
}
if (status.getContainerId().equals(containerId5)) {
container5Exists=true;
}
if (status.getContainerId().equals(containerId6)) {
container6Exists=true;
}
}
Assert.assertTrue(container3Exists && container4Exists && container5Exists&& container6Exists);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
SchedulerApplicationAttempt schedulerNewAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId2);
MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am2);
Assert.assertFalse(schedulerNewAttempt.getLiveContainers().contains(containerId2));
System.out.println("New attempt's just finished containers: " + newAttempt.getJustFinishedContainers());
waitForContainersToFinish(5,newAttempt);
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testNMTokensRebindOnAMRestart() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,3);
MockRM rm1=new MockRM(conf);
rm1.start();
RMApp app1=rm1.submitApp(200,"myname","myuser",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true);
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
MockNM nm2=new MockNM("127.1.1.1:4321",8000,rm1.getResourceTrackerService());
nm2.registerNode();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
List containers=new ArrayList();
List expectedNMTokens=new ArrayList();
while (true) {
AllocateResponse response=am1.allocate("127.0.0.1",2000,2,new ArrayList());
nm1.nodeHeartbeat(true);
containers.addAll(response.getAllocatedContainers());
expectedNMTokens.addAll(response.getNMTokens());
if (containers.size() == 2) {
break;
}
Thread.sleep(200);
System.out.println("Waiting for container to be allocated.");
}
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING);
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING);
ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3);
rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am1.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am2=MockRM.launchAM(app1,rm1,nm1);
RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt();
rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING);
Assert.assertEquals(expectedNMTokens,registerResponse.getNMTokensFromPreviousAttempts());
containers=new ArrayList();
while (true) {
AllocateResponse allocateResponse=am2.allocate("127.1.1.1",4000,1,new ArrayList());
nm2.nodeHeartbeat(true);
containers.addAll(allocateResponse.getAllocatedContainers());
expectedNMTokens.addAll(allocateResponse.getNMTokens());
if (containers.size() == 1) {
break;
}
Thread.sleep(200);
System.out.println("Waiting for container to be allocated.");
}
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),2,ContainerState.RUNNING);
ContainerId am2ContainerId2=ContainerId.newInstance(am2.getApplicationAttemptId(),2);
rm1.waitForState(nm1,am2ContainerId2,RMContainerState.RUNNING);
nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am2.waitForState(RMAppAttemptState.FAILED);
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am3=MockRM.launchAM(app1,rm1,nm1);
registerResponse=am3.registerAppAttempt();
rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING);
List transferredTokens=registerResponse.getNMTokensFromPreviousAttempts();
Assert.assertEquals(2,transferredTokens.size());
Assert.assertTrue(transferredTokens.containsAll(expectedNMTokens));
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testPreemptedAMRestartOnRMRestart() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
CapacityScheduler scheduler=(CapacityScheduler)rm1.getResourceScheduler();
ContainerId amContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),1);
scheduler.killContainer(scheduler.getRMContainer(amContainer));
am1.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt1.shouldCountTowardsMaxAttemptRetry());
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId());
Assert.assertEquals(1,appState.getAttemptCount());
Assert.assertEquals(ContainerExitStatus.PREEMPTED,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus());
MockRM rm2=new MockRM(conf,memStore);
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm1.registerNode();
rm2.start();
MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2);
RMAppAttempt attempt2=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt();
Assert.assertTrue(attempt2.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus());
rm1.stop();
rm2.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=100000) public void testShouldNotCountFailureToMaxAttemptRetry() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
CapacityScheduler scheduler=(CapacityScheduler)rm1.getResourceScheduler();
ContainerId amContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),1);
scheduler.killContainer(scheduler.getRMContainer(amContainer));
am1.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt1.shouldCountTowardsMaxAttemptRetry());
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId());
MockAM am2=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
RMAppAttempt attempt2=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt2).mayBeLastAttempt());
ContainerId amContainer2=ContainerId.newInstance(am2.getApplicationAttemptId(),1);
scheduler.killContainer(scheduler.getRMContainer(amContainer2));
am2.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt2.shouldCountTowardsMaxAttemptRetry());
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am3=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),3,nm1);
RMAppAttempt attempt3=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt3).mayBeLastAttempt());
ContainerStatus containerStatus=Records.newRecord(ContainerStatus.class);
containerStatus.setContainerId(attempt3.getMasterContainer().getId());
containerStatus.setDiagnostics("mimic NM disk_failure");
containerStatus.setState(ContainerState.COMPLETE);
containerStatus.setExitStatus(ContainerExitStatus.DISKS_FAILED);
Map> conts=new HashMap>();
conts.put(app1.getApplicationId(),Collections.singletonList(containerStatus));
nm1.nodeHeartbeat(conts,true);
am3.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt3.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.DISKS_FAILED,appState.getAttempt(am3.getApplicationAttemptId()).getAMContainerExitStatus());
rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am4=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),4,nm1);
RMAppAttempt attempt4=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt4).mayBeLastAttempt());
MockNM nm2=new MockNM("127.0.0.1:2234",8000,rm1.getResourceTrackerService());
nm2.registerNode();
nm1.nodeHeartbeat(false);
am4.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(!attempt4.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.ABORTED,appState.getAttempt(am4.getApplicationAttemptId()).getAMContainerExitStatus());
nm2.nodeHeartbeat(true);
MockAM am5=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),5,nm2);
RMAppAttempt attempt5=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt5).mayBeLastAttempt());
nm2.nodeHeartbeat(am5.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am5.waitForState(RMAppAttemptState.FAILED);
Assert.assertTrue(attempt5.shouldCountTowardsMaxAttemptRetry());
rm1.waitForState(app1.getApplicationId(),RMAppState.FAILED);
Assert.assertEquals(5,app1.getAppAttempts().size());
rm1.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=50000) public void testRMRestartOrFailoverNotCountedForAMFailures() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true);
conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1);
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
MockRM rm1=new MockRM(conf,memStore);
rm1.start();
MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
Assert.assertTrue(((RMAppAttemptImpl)attempt1).mayBeLastAttempt());
MockRM rm2=new MockRM(conf,memStore);
rm2.start();
ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId());
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
NMContainerStatus status=Records.newRecord(NMContainerStatus.class);
status.setContainerExitStatus(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER);
status.setContainerId(attempt1.getMasterContainer().getId());
status.setContainerState(ContainerState.COMPLETE);
status.setDiagnostics("");
nm1.registerNode(Collections.singletonList(status),null);
rm2.waitForState(attempt1.getAppAttemptId(),RMAppAttemptState.FAILED);
Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus());
rm2.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1);
MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2);
RMAppAttempt attempt3=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt();
Assert.assertTrue(attempt3.shouldCountTowardsMaxAttemptRetry());
Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus());
rm1.stop();
rm2.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testExpireKill(){
final long killTime=10000L;
int[][] qData=new int[][]{{100,40,40,20},{100,100,100,100},{100,0,60,40},{10,10,0,0},{0,0,0,0},{3,1,1,1},{-1,1,1,1},{3,0,0,0}};
conf.setLong(WAIT_TIME_BEFORE_KILL,killTime);
ProportionalCapacityPreemptionPolicy policy=buildPolicy(qData);
when(mClock.getTime()).thenReturn(0L);
policy.editSchedule();
verify(mDisp,times(10)).handle(argThat(new IsPreemptionRequestFor(appC)));
when(mClock.getTime()).thenReturn(killTime / 2);
policy.editSchedule();
verify(mDisp,times(20)).handle(argThat(new IsPreemptionRequestFor(appC)));
when(mClock.getTime()).thenReturn(killTime + 1);
policy.editSchedule();
verify(mDisp,times(30)).handle(evtCaptor.capture());
List events=evtCaptor.getAllValues();
for ( ContainerPreemptEvent e : events.subList(20,30)) {
assertEquals(appC,e.getAppId());
assertEquals(KILL_CONTAINER,e.getType());
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier PublicFieldVerifier
@Test(timeout=60000) public void testFSRMStateStore() throws Exception {
HdfsConfiguration conf=new HdfsConfiguration();
MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
fsTester=new TestFSRMStateStoreTester(cluster);
FSDataOutputStream fsOut=null;
FileSystemRMStateStore fileSystemRMStateStore=(FileSystemRMStateStore)fsTester.getRMStateStore();
String appAttemptIdStr3="appattempt_1352994193343_0001_000003";
ApplicationAttemptId attemptId3=ConverterUtils.toApplicationAttemptId(appAttemptIdStr3);
Path appDir=fsTester.store.getAppDir(attemptId3.getApplicationId().toString());
Path tempAppAttemptFile=new Path(appDir,attemptId3.toString() + ".tmp");
fsOut=fileSystemRMStateStore.fs.create(tempAppAttemptFile,false);
fsOut.write("Some random data ".getBytes());
fsOut.close();
testRMAppStateStore(fsTester);
Assert.assertFalse(fsTester.workingDirPathURI.getFileSystem(conf).exists(tempAppAttemptFile));
testRMDTSecretManagerStateStore(fsTester);
testCheckVersion(fsTester);
testEpoch(fsTester);
testAppDeletion(fsTester);
testDeleteStore(fsTester);
testAMRMTokenSecretManagerStateStore(fsTester);
}
finally {
cluster.shutdown();
}
}
InternalCallVerifier EqualityVerifier
@SuppressWarnings("unchecked") @Test public void testFencing() throws Exception {
StateChangeRequestInfo req=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER);
Configuration conf1=createHARMConf("rm1,rm2","rm1",1234);
conf1.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
ResourceManager rm1=new ResourceManager();
rm1.init(conf1);
rm1.start();
rm1.getRMContext().getRMAdminService().transitionToActive(req);
assertEquals("RM with ZKStore didn't start",Service.STATE.STARTED,rm1.getServiceState());
assertEquals("RM should be Active",HAServiceProtocol.HAServiceState.ACTIVE,rm1.getRMContext().getRMAdminService().getServiceStatus().getState());
Configuration conf2=createHARMConf("rm1,rm2","rm2",5678);
conf2.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false);
ResourceManager rm2=new ResourceManager();
rm2.init(conf2);
rm2.start();
rm2.getRMContext().getRMAdminService().transitionToActive(req);
assertEquals("RM with ZKStore didn't start",Service.STATE.STARTED,rm2.getServiceState());
assertEquals("RM should be Active",HAServiceProtocol.HAServiceState.ACTIVE,rm2.getRMContext().getRMAdminService().getServiceStatus().getState());
for (int i=0; i < ZK_TIMEOUT_MS / 50; i++) {
if (HAServiceProtocol.HAServiceState.ACTIVE == rm1.getRMContext().getRMAdminService().getServiceStatus().getState()) {
Thread.sleep(100);
}
}
assertEquals("RM should have been fenced",HAServiceProtocol.HAServiceState.STANDBY,rm1.getRMContext().getRMAdminService().getServiceStatus().getState());
assertEquals("RM should be Active",HAServiceProtocol.HAServiceState.ACTIVE,rm2.getRMContext().getRMAdminService().getServiceStatus().getState());
}
InternalCallVerifier EqualityVerifier
@Test public void testZKRetryInterval() throws Exception {
TestZKClient zkClientTester=new TestZKClient();
YarnConfiguration conf=new YarnConfiguration();
ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf);
assertEquals(YarnConfiguration.DEFAULT_RM_ZK_RETRY_INTERVAL_MS,store.zkRetryInterval);
store.stop();
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf);
assertEquals(YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS / YarnConfiguration.DEFAULT_ZK_RM_NUM_RETRIES,store.zkRetryInterval);
store.stop();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testZKSessionTimeout() throws Exception {
TestZKClient zkClientTester=new TestZKClient();
String path="/test";
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,ZK_TIMEOUT_MS);
ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf);
TestDispatcher dispatcher=new TestDispatcher();
store.setRMDispatcher(dispatcher);
zkClientTester.forExpire=true;
store.createWithRetries(path,null,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT);
store.getDataWithRetries(path,true);
store.setDataWithRetries(path,"bytes".getBytes(),0);
zkClientTester.syncBarrier.await();
try {
byte[] ret=store.getDataWithRetries(path,false);
assertEquals("bytes",new String(ret));
}
catch ( Exception e) {
String error="New session creation failed";
LOG.error(error,e);
fail(error);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testZKClientDisconnectAndReconnect() throws Exception {
TestZKClient zkClientTester=new TestZKClient();
String path="/test";
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,ZK_TIMEOUT_MS);
ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf);
TestDispatcher dispatcher=new TestDispatcher();
store.setRMDispatcher(dispatcher);
store.createWithRetries(path,null,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT);
store.getDataWithRetries(path,true);
store.setDataWithRetries(path,"newBytes".getBytes(),0);
stopServer();
zkClientTester.watcher.waitForDisconnected(ZK_OP_WAIT_TIME);
try {
store.getDataWithRetries(path,true);
fail("Expected ZKClient time out exception");
}
catch ( Exception e) {
assertTrue(e.getMessage().contains("Wait for ZKClient creation timed out"));
}
startServer();
zkClientTester.watcher.waitForConnected(ZK_OP_WAIT_TIME);
byte[] ret=null;
try {
ret=store.getDataWithRetries(path,true);
}
catch ( Exception e) {
String error="ZKRMStateStore Session restore failed";
LOG.error(error,e);
fail(error);
}
assertEquals("newBytes",new String(ret));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testWeights(){
ResourceWeights rw1=new ResourceWeights();
Assert.assertEquals("Default CPU weight should be 0.0f.",0.0f,rw1.getWeight(ResourceType.CPU),0.00001f);
Assert.assertEquals("Default memory weight should be 0.0f",0.0f,rw1.getWeight(ResourceType.MEMORY),0.00001f);
ResourceWeights rw2=new ResourceWeights(2.0f);
Assert.assertEquals("The CPU weight should be 2.0f.",2.0f,rw2.getWeight(ResourceType.CPU),0.00001f);
Assert.assertEquals("The memory weight should be 2.0f",2.0f,rw2.getWeight(ResourceType.MEMORY),0.00001f);
ResourceWeights rw3=new ResourceWeights(1.5f,2.0f);
Assert.assertEquals("The CPU weight should be 2.0f",2.0f,rw3.getWeight(ResourceType.CPU),0.00001f);
Assert.assertEquals("The memory weight should be 1.5f",1.5f,rw3.getWeight(ResourceType.MEMORY),0.00001f);
rw3.setWeight(ResourceType.CPU,2.5f);
Assert.assertEquals("The CPU weight should be set to 2.5f.",2.5f,rw3.getWeight(ResourceType.CPU),0.00001f);
rw3.setWeight(ResourceType.MEMORY,4.0f);
Assert.assertEquals("The memory weight should be set to 4.0f.",4.0f,rw3.getWeight(ResourceType.MEMORY),0.00001f);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRPCResponseId() throws IOException, YarnException {
String node="localhost";
Resource capability=BuilderUtils.newResource(1024,1);
RegisterNodeManagerRequest request=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
nodeId=NodeId.newInstance(node,1234);
request.setNodeId(nodeId);
request.setHttpPort(0);
request.setResource(capability);
RegisterNodeManagerRequest request1=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
request1.setNodeId(nodeId);
request1.setHttpPort(0);
request1.setResource(capability);
resourceTrackerService.registerNodeManager(request1);
org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus=recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class);
nodeStatus.setNodeId(nodeId);
NodeHealthStatus nodeHealthStatus=recordFactory.newRecordInstance(NodeHealthStatus.class);
nodeHealthStatus.setIsNodeHealthy(true);
nodeStatus.setNodeHealthStatus(nodeHealthStatus);
NodeHeartbeatRequest nodeHeartBeatRequest=recordFactory.newRecordInstance(NodeHeartbeatRequest.class);
nodeHeartBeatRequest.setNodeStatus(nodeStatus);
nodeStatus.setResponseId(0);
NodeHeartbeatResponse response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest);
Assert.assertTrue(response.getResponseId() == 1);
nodeStatus.setResponseId(response.getResponseId());
response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest);
Assert.assertTrue(response.getResponseId() == 2);
response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest);
Assert.assertTrue(response.getResponseId() == 2);
nodeStatus.setResponseId(0);
response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest);
Assert.assertTrue(NodeAction.RESYNC.equals(response.getNodeAction()));
Assert.assertEquals("Too far behind rm response id:2 nm response id:0",response.getDiagnosticsMessage());
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppRunningFailed() throws IOException {
LOG.info("--- START: testAppRunningFailed ---");
RMApp application=testCreateAppRunning(null);
RMAppAttempt appAttempt=application.getCurrentAppAttempt();
int expectedAttemptId=1;
Assert.assertEquals(expectedAttemptId,appAttempt.getAppAttemptId().getAttemptId());
Assert.assertTrue(maxAppAttempts > 1);
for (int i=1; i < maxAppAttempts; i++) {
RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false);
application.handle(event);
rmDispatcher.await();
assertAppState(RMAppState.ACCEPTED,application);
appAttempt=application.getCurrentAppAttempt();
Assert.assertEquals(++expectedAttemptId,appAttempt.getAppAttemptId().getAttemptId());
event=new RMAppEvent(application.getApplicationId(),RMAppEventType.APP_ACCEPTED);
application.handle(event);
rmDispatcher.await();
assertAppState(RMAppState.ACCEPTED,application);
event=new RMAppEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_REGISTERED);
application.handle(event);
rmDispatcher.await();
assertAppState(RMAppState.RUNNING,application);
}
RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false);
application.handle(event);
rmDispatcher.await();
sendAppUpdateSavedEvent(application);
assertFailed(application,".*Failing the application.*");
assertAppFinalStateSaved(application);
event=new RMAppEvent(application.getApplicationId(),RMAppEventType.KILL);
application.handle(event);
rmDispatcher.await();
assertFailed(application,".*Failing the application.*");
assertAppFinalStateSaved(application);
verifyApplicationFinished(RMAppState.FAILED);
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetAppReport(){
RMApp app=createNewTestApp(null);
assertAppState(RMAppState.NEW,app);
ApplicationReport report=app.createAndGetApplicationReport(null,true);
Assert.assertNotNull(report.getApplicationResourceUsageReport());
Assert.assertEquals(report.getApplicationResourceUsageReport(),RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT);
report=app.createAndGetApplicationReport("clientuser",true);
Assert.assertNotNull(report.getApplicationResourceUsageReport());
}
InternalCallVerifier BooleanVerifier
@Test public void testAppFinalSavingToFinished() throws IOException {
LOG.info("--- START: testAppFinalSavingToFinished ---");
RMApp application=testCreateAppFinalSaving(null);
final String diagMsg="some diagnostics";
RMAppEvent event=new RMAppFinishedAttemptEvent(application.getApplicationId(),diagMsg);
application.handle(event);
assertAppState(RMAppState.FINAL_SAVING,application);
RMAppEvent appUpdated=new RMAppEvent(application.getApplicationId(),RMAppEventType.APP_UPDATE_SAVED);
application.handle(appUpdated);
assertAppState(RMAppState.FINISHED,application);
assertTimesAtFinish(application);
assertFinalAppStatus(FinalApplicationStatus.FAILED,application);
Assert.assertTrue("Finished app missing diagnostics",application.getDiagnostics().indexOf(diagMsg) != -1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUnmanagedApp() throws IOException {
ApplicationSubmissionContext subContext=new ApplicationSubmissionContextPBImpl();
subContext.setUnmanagedAM(true);
LOG.info("--- START: testUnmanagedAppSuccessPath ---");
final String diagMsg="some diagnostics";
RMApp application=testCreateAppFinished(subContext,diagMsg);
Assert.assertTrue("Finished app missing diagnostics",application.getDiagnostics().indexOf(diagMsg) != -1);
reset(writer);
LOG.info("--- START: testUnmanagedAppFailPath ---");
application=testCreateAppRunning(subContext);
RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false);
application.handle(event);
rmDispatcher.await();
RMAppAttempt appAttempt=application.getCurrentAppAttempt();
Assert.assertEquals(1,appAttempt.getAppAttemptId().getAttemptId());
sendAppUpdateSavedEvent(application);
assertFailed(application,".*Unmanaged application.*Failing the application.*");
assertAppFinalStateSaved(application);
}
InternalCallVerifier EqualityVerifier
@Test public void testAppFinishedFinished() throws IOException {
LOG.info("--- START: testAppFinishedFinished ---");
RMApp application=testCreateAppFinished(null,"");
RMAppEvent event=new RMAppEvent(application.getApplicationId(),RMAppEventType.KILL);
application.handle(event);
rmDispatcher.await();
assertTimesAtFinish(application);
assertAppState(RMAppState.FINISHED,application);
StringBuilder diag=application.getDiagnostics();
Assert.assertEquals("application diagnostics is not correct","",diag.toString());
verifyApplicationFinished(RMAppState.FINISHED);
}
InternalCallVerifier EqualityVerifier
@Test public void testUnmanagedAMUnexpectedRegistration(){
unmanagedAM=true;
when(submissionContext.getUnmanagedAM()).thenReturn(true);
submitApplicationAttempt();
assertEquals(RMAppAttemptState.SUBMITTED,applicationAttempt.getAppAttemptState());
applicationAttempt.handle(new RMAppAttemptRegistrationEvent(applicationAttempt.getAppAttemptId(),"host",8042,"oldtrackingurl"));
assertEquals(YarnApplicationAttemptState.SUBMITTED,applicationAttempt.createApplicationAttemptState());
testAppAttemptSubmittedToFailedState("Unmanaged AM must register after AM attempt reaches LAUNCHED state.");
}
InternalCallVerifier EqualityVerifier
@Test public void testSubmittedToKilled(){
submitApplicationAttempt();
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL));
assertEquals(YarnApplicationAttemptState.SUBMITTED,applicationAttempt.createApplicationAttemptState());
testAppAttemptKilledState(null,EMPTY_DIAGNOSTICS);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRunningToFailed(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
String containerDiagMsg="some error";
int exitCode=123;
ContainerStatus cs=BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,containerDiagMsg,exitCode);
ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId();
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,"",0)));
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
assertEquals(amContainer,applicationAttempt.getMasterContainer());
assertEquals(0,application.getRanNodes().size());
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyAMHostAndPortInvalidated();
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
InternalCallVerifier AssumptionSetter NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetClientToken() throws Exception {
assumeTrue(isSecurityEnabled);
Container amContainer=allocateApplicationAttempt();
Token token=applicationAttempt.createClientToken(null);
Assert.assertNull(token);
token=applicationAttempt.createClientToken("clientuser");
Assert.assertNull(token);
launchApplicationAttempt(amContainer);
token=applicationAttempt.createClientToken(null);
Assert.assertNull(token);
token=applicationAttempt.createClientToken("clientuser");
Assert.assertNotNull(token);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL));
assertEquals(YarnApplicationAttemptState.LAUNCHED,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
token=applicationAttempt.createClientToken(null);
Assert.assertNull(token);
token=applicationAttempt.createClientToken("clientuser");
Assert.assertNull(token);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testContainersCleanupForLastAttempt(){
applicationAttempt=new RMAppAttemptImpl(applicationAttempt.getAppAttemptId(),rmContext,scheduler,masterService,submissionContext,new Configuration(),true);
when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true);
when(submissionContext.getMaxAppAttempts()).thenReturn(1);
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
ContainerStatus cs1=ContainerStatus.newInstance(amContainer.getId(),ContainerState.COMPLETE,"some error",123);
ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId();
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs1));
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertFalse(transferStateFromPreviousAttempt);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
InternalCallVerifier EqualityVerifier
@Test public void testAMCrashAtScheduled(){
scheduleApplicationAttempt();
ContainerStatus cs=SchedulerUtils.createAbnormalContainerStatus(BuilderUtils.newContainerId(applicationAttempt.getAppAttemptId(),1),SchedulerUtils.LOST_CONTAINER);
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),cs));
assertEquals(YarnApplicationAttemptState.SCHEDULED,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRunningToKilled(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,"",0)));
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.KILLED,applicationAttempt.getAppAttemptState());
assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
assertEquals(amContainer,applicationAttempt.getMasterContainer());
assertEquals(0,application.getRanNodes().size());
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
verifyAMHostAndPortInvalidated();
verifyApplicationAttemptFinished(RMAppAttemptState.KILLED);
}
InternalCallVerifier EqualityVerifier
@Test public void testScheduledToKilled(){
scheduleApplicationAttempt();
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL));
assertEquals(YarnApplicationAttemptState.SCHEDULED,applicationAttempt.createApplicationAttemptState());
testAppAttemptKilledState(null,EMPTY_DIAGNOSTICS);
}
InternalCallVerifier EqualityVerifier
@Test public void testAllocatedToKilled(){
Container amContainer=allocateApplicationAttempt();
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL));
assertEquals(YarnApplicationAttemptState.ALLOCATED,applicationAttempt.createApplicationAttemptState());
testAppAttemptKilledState(amContainer,EMPTY_DIAGNOSTICS);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testFailedToFailed(){
when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true);
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
ContainerStatus cs1=ContainerStatus.newInstance(amContainer.getId(),ContainerState.COMPLETE,"some error",123);
ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId();
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs1));
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertTrue(transferStateFromPreviousAttempt);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
ContainerStatus cs2=ContainerStatus.newInstance(ContainerId.newInstance(appAttemptId,2),ContainerState.COMPLETE,"",0);
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs2));
assertEquals(1,applicationAttempt.getJustFinishedContainers().size());
assertEquals(cs2.getContainerId(),applicationAttempt.getJustFinishedContainers().get(0).getContainerId());
}
InternalCallVerifier EqualityVerifier
@Test public void testFinalSavingToFinishedWithExpire(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
FinalApplicationStatus finalStatus=FinalApplicationStatus.SUCCEEDED;
String trackingUrl="mytrackingurl";
String diagnostics="Successssseeeful";
applicationAttempt.handle(new RMAppAttemptUnregistrationEvent(applicationAttempt.getAppAttemptId(),trackingUrl,finalStatus,diagnostics));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
testAppAttemptFinishedState(amContainer,finalStatus,trackingUrl,diagnostics,0,false);
}
InternalCallVerifier EqualityVerifier
@Test public void testAllocatedToFailed(){
Container amContainer=allocateApplicationAttempt();
String diagnostics="Launch Failed";
applicationAttempt.handle(new RMAppAttemptLaunchFailedEvent(applicationAttempt.getAppAttemptId(),diagnostics));
assertEquals(YarnApplicationAttemptState.ALLOCATED,applicationAttempt.createApplicationAttemptState());
testAppAttemptFailedState(amContainer,diagnostics);
}
InternalCallVerifier EqualityVerifier
@Test public void testNewToKilled(){
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL));
assertEquals(YarnApplicationAttemptState.NEW,applicationAttempt.createApplicationAttemptState());
testAppAttemptKilledState(null,EMPTY_DIAGNOSTICS);
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testLaunchedExpire(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(YarnApplicationAttemptState.LAUNCHED,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out"));
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
InternalCallVerifier EqualityVerifier
@Test public void testAMCrashAtAllocated(){
Container amContainer=allocateApplicationAttempt();
String containerDiagMsg="some error";
int exitCode=123;
ContainerStatus cs=BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,containerDiagMsg,exitCode);
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),cs));
assertEquals(YarnApplicationAttemptState.ALLOCATED,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
boolean shouldCheckURL=(applicationAttempt.getTrackingUrl() != null);
verifyAMCrashAtAllocatedDiagnosticInfo(applicationAttempt.getDiagnostics(),exitCode,shouldCheckURL);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testRunningExpire(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE));
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState());
assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out"));
String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId());
assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl());
assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl());
verifyTokenCount(applicationAttempt.getAppAttemptId(),1);
verifyAMHostAndPortInvalidated();
verifyApplicationAttemptFinished(RMAppAttemptState.FAILED);
}
InternalCallVerifier EqualityVerifier
@Test public void testFinalSavingToFinishedWithContainerFinished(){
Container amContainer=allocateApplicationAttempt();
launchApplicationAttempt(amContainer);
runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false);
FinalApplicationStatus finalStatus=FinalApplicationStatus.SUCCEEDED;
String trackingUrl="mytrackingurl";
String diagnostics="Successful";
applicationAttempt.handle(new RMAppAttemptUnregistrationEvent(applicationAttempt.getAppAttemptId(),trackingUrl,finalStatus,diagnostics));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState());
applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,"",0)));
assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
testAppAttemptFinishedState(amContainer,finalStatus,trackingUrl,diagnostics,0,false);
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testUnmanagedAMContainersCleanup(){
unmanagedAM=true;
when(submissionContext.getUnmanagedAM()).thenReturn(true);
when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true);
submitApplicationAttempt();
applicationAttempt.handle(new RMAppAttemptRegistrationEvent(applicationAttempt.getAppAttemptId(),"host",8042,"oldtrackingurl"));
assertEquals(YarnApplicationAttemptState.SUBMITTED,applicationAttempt.createApplicationAttemptState());
sendAttemptUpdateSavedEvent(applicationAttempt);
assertFalse(transferStateFromPreviousAttempt);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReleaseWhileRunning(){
DrainDispatcher drainDispatcher=new DrainDispatcher();
EventHandler appAttemptEventHandler=mock(EventHandler.class);
EventHandler generic=mock(EventHandler.class);
drainDispatcher.register(RMAppAttemptEventType.class,appAttemptEventHandler);
drainDispatcher.register(RMNodeEventType.class,generic);
drainDispatcher.init(new YarnConfiguration());
drainDispatcher.start();
NodeId nodeId=BuilderUtils.newNodeId("host",3425);
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
ContainerId containerId=BuilderUtils.newContainerId(appAttemptId,1);
ContainerAllocationExpirer expirer=mock(ContainerAllocationExpirer.class);
Resource resource=BuilderUtils.newResource(512,1);
Priority priority=BuilderUtils.newPriority(5);
Container container=BuilderUtils.newContainer(containerId,nodeId,"host:3465",resource,priority,null);
RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class);
RMContext rmContext=mock(RMContext.class);
when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
RMContainer rmContainer=new RMContainerImpl(container,appAttemptId,nodeId,"user",rmContext);
assertEquals(RMContainerState.NEW,rmContainer.getState());
assertEquals(resource,rmContainer.getAllocatedResource());
assertEquals(nodeId,rmContainer.getAllocatedNode());
assertEquals(priority,rmContainer.getAllocatedPriority());
verify(writer).containerStarted(any(RMContainer.class));
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.START));
drainDispatcher.await();
assertEquals(RMContainerState.ALLOCATED,rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.ACQUIRED));
drainDispatcher.await();
assertEquals(RMContainerState.ACQUIRED,rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.LAUNCHED));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING,rmContainer.getState());
assertEquals("//host:3465/node/containerlogs/container_1_0001_01_000001/user",rmContainer.getLogURL());
reset(appAttemptEventHandler);
ContainerStatus containerStatus=SchedulerUtils.createAbnormalContainerStatus(containerId,SchedulerUtils.RELEASED_CONTAINER);
rmContainer.handle(new RMContainerFinishedEvent(containerId,containerStatus,RMContainerEventType.RELEASED));
drainDispatcher.await();
assertEquals(RMContainerState.RELEASED,rmContainer.getState());
assertEquals(SchedulerUtils.RELEASED_CONTAINER,rmContainer.getDiagnosticsInfo());
assertEquals(ContainerExitStatus.ABORTED,rmContainer.getContainerExitStatus());
assertEquals(ContainerState.COMPLETE,rmContainer.getContainerState());
verify(writer).containerFinished(any(RMContainer.class));
ArgumentCaptor captor=ArgumentCaptor.forClass(RMAppAttemptContainerFinishedEvent.class);
verify(appAttemptEventHandler).handle(captor.capture());
RMAppAttemptContainerFinishedEvent cfEvent=captor.getValue();
assertEquals(appAttemptId,cfEvent.getApplicationAttemptId());
assertEquals(containerStatus,cfEvent.getContainerStatus());
assertEquals(RMAppAttemptEventType.CONTAINER_FINISHED,cfEvent.getType());
rmContainer.handle(new RMContainerFinishedEvent(containerId,SchedulerUtils.createAbnormalContainerStatus(containerId,"FinishedContainer"),RMContainerEventType.FINISHED));
assertEquals(RMContainerState.RELEASED,rmContainer.getState());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testExpireWhileRunning(){
DrainDispatcher drainDispatcher=new DrainDispatcher();
EventHandler appAttemptEventHandler=mock(EventHandler.class);
EventHandler generic=mock(EventHandler.class);
drainDispatcher.register(RMAppAttemptEventType.class,appAttemptEventHandler);
drainDispatcher.register(RMNodeEventType.class,generic);
drainDispatcher.init(new YarnConfiguration());
drainDispatcher.start();
NodeId nodeId=BuilderUtils.newNodeId("host",3425);
ApplicationId appId=BuilderUtils.newApplicationId(1,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
ContainerId containerId=BuilderUtils.newContainerId(appAttemptId,1);
ContainerAllocationExpirer expirer=mock(ContainerAllocationExpirer.class);
Resource resource=BuilderUtils.newResource(512,1);
Priority priority=BuilderUtils.newPriority(5);
Container container=BuilderUtils.newContainer(containerId,nodeId,"host:3465",resource,priority,null);
RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class);
RMContext rmContext=mock(RMContext.class);
when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
RMContainer rmContainer=new RMContainerImpl(container,appAttemptId,nodeId,"user",rmContext);
assertEquals(RMContainerState.NEW,rmContainer.getState());
assertEquals(resource,rmContainer.getAllocatedResource());
assertEquals(nodeId,rmContainer.getAllocatedNode());
assertEquals(priority,rmContainer.getAllocatedPriority());
verify(writer).containerStarted(any(RMContainer.class));
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.START));
drainDispatcher.await();
assertEquals(RMContainerState.ALLOCATED,rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.ACQUIRED));
drainDispatcher.await();
assertEquals(RMContainerState.ACQUIRED,rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.LAUNCHED));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING,rmContainer.getState());
assertEquals("//host:3465/node/containerlogs/container_1_0001_01_000001/user",rmContainer.getLogURL());
reset(appAttemptEventHandler);
ContainerStatus containerStatus=SchedulerUtils.createAbnormalContainerStatus(containerId,SchedulerUtils.EXPIRED_CONTAINER);
rmContainer.handle(new RMContainerFinishedEvent(containerId,containerStatus,RMContainerEventType.EXPIRE));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING,rmContainer.getState());
verify(writer,never()).containerFinished(any(RMContainer.class));
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testExistenceOfResourceRequestInRMContainer() throws Exception {
Configuration conf=new Configuration();
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=rm1.registerNode("unknownhost:1234",8000);
RMApp app1=rm1.submitApp(1024);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
ResourceScheduler scheduler=rm1.getResourceScheduler();
am1.allocate("127.0.0.1",1024,1,new ArrayList());
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED);
Assert.assertNotNull(scheduler.getRMContainer(containerId2).getResourceRequests());
am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
rm1.waitForState(nm1,containerId2,RMContainerState.ACQUIRED);
Assert.assertNull(scheduler.getRMContainer(containerId2).getResourceRequests());
}
APIUtilityVerifier InternalCallVerifier NullVerifier
@Test public void testMetricsCache(){
MetricsSystem ms=new MetricsSystemImpl("cache");
ms.start();
try {
String p1="root1";
String leafQueueName="root1.leaf";
QueueMetrics p1Metrics=QueueMetrics.forQueue(ms,p1,null,true,conf);
Queue parentQueue1=make(stub(Queue.class).returning(p1Metrics).from.getMetrics());
QueueMetrics metrics=QueueMetrics.forQueue(ms,leafQueueName,parentQueue1,true,conf);
Assert.assertNotNull("QueueMetrics for A shoudn't be null",metrics);
QueueMetrics alterMetrics=QueueMetrics.forQueue(ms,leafQueueName,parentQueue1,true,conf);
Assert.assertNotNull("QueueMetrics for alterMetrics shoudn't be null",alterMetrics);
}
finally {
ms.shutdown();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMove(){
final String user="user1";
Queue parentQueue=createQueue("parent",null);
Queue oldQueue=createQueue("old",parentQueue);
Queue newQueue=createQueue("new",parentQueue);
QueueMetrics parentMetrics=parentQueue.getMetrics();
QueueMetrics oldMetrics=oldQueue.getMetrics();
QueueMetrics newMetrics=newQueue.getMetrics();
ApplicationAttemptId appAttId=createAppAttemptId(0,0);
RMContext rmContext=mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3);
SchedulerApplicationAttempt app=new SchedulerApplicationAttempt(appAttId,user,oldQueue,oldQueue.getActiveUsersManager(),rmContext);
oldMetrics.submitApp(user);
assertEquals(app.getNewContainerId(),0x00c00001);
Resource requestedResource=Resource.newInstance(1536,2);
Priority requestedPriority=Priority.newInstance(2);
ResourceRequest request=ResourceRequest.newInstance(requestedPriority,ResourceRequest.ANY,requestedResource,3);
app.updateResourceRequests(Arrays.asList(request));
RMContainer container1=createRMContainer(appAttId,1,requestedResource);
app.liveContainers.put(container1.getContainerId(),container1);
SchedulerNode node=createNode();
app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH,node,requestedPriority,request,container1.getContainer());
Priority prio1=Priority.newInstance(1);
Resource reservedResource=Resource.newInstance(2048,3);
RMContainer container2=createReservedRMContainer(appAttId,1,reservedResource,node.getNodeID(),prio1);
Map reservations=new HashMap();
reservations.put(node.getNodeID(),container2);
app.reservedContainers.put(prio1,reservations);
oldMetrics.reserveResource(user,reservedResource);
checkQueueMetrics(oldMetrics,1,1,1536,2,2048,3,3072,4);
checkQueueMetrics(newMetrics,0,0,0,0,0,0,0,0);
checkQueueMetrics(parentMetrics,1,1,1536,2,2048,3,3072,4);
app.move(newQueue);
checkQueueMetrics(oldMetrics,0,0,0,0,0,0,0,0);
checkQueueMetrics(newMetrics,1,1,1536,2,2048,3,3072,4);
checkQueueMetrics(parentMetrics,1,1,1536,2,2048,3,3072,4);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testNormalizeRequest(){
ResourceCalculator resourceCalculator=new DefaultResourceCalculator();
final int minMemory=1024;
final int maxMemory=8192;
Resource minResource=Resources.createResource(minMemory,0);
Resource maxResource=Resources.createResource(maxMemory,0);
ResourceRequest ask=new ResourceRequestPBImpl();
ask.setCapability(Resources.createResource(-1024));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(minMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(0));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(minMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(2 * minMemory));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(2 * minMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(minMemory + 10));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(2 * minMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(maxMemory));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(maxMemory,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(maxMemory - 10));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(maxMemory,ask.getCapability().getMemory());
maxResource=Resources.createResource(maxMemory - 10,0);
ask.setCapability(Resources.createResource(maxMemory - 100));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(maxResource.getMemory(),ask.getCapability().getMemory());
maxResource=Resources.createResource(maxMemory,0);
ask.setCapability(Resources.createResource(maxMemory + 100));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource);
assertEquals(maxResource.getMemory(),ask.getCapability().getMemory());
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testNormalizeRequestWithDominantResourceCalculator(){
ResourceCalculator resourceCalculator=new DominantResourceCalculator();
Resource minResource=Resources.createResource(1024,1);
Resource maxResource=Resources.createResource(10240,10);
Resource clusterResource=Resources.createResource(10 * 1024,10);
ResourceRequest ask=new ResourceRequestPBImpl();
ask.setCapability(Resources.createResource(-1024,-1));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,clusterResource,minResource,maxResource);
assertEquals(minResource,ask.getCapability());
ask.setCapability(Resources.createResource(0,0));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,clusterResource,minResource,maxResource);
assertEquals(minResource,ask.getCapability());
assertEquals(1,ask.getCapability().getVirtualCores());
assertEquals(1024,ask.getCapability().getMemory());
ask.setCapability(Resources.createResource(1536,0));
SchedulerUtils.normalizeRequest(ask,resourceCalculator,clusterResource,minResource,maxResource);
assertEquals(Resources.createResource(2048,1),ask.getCapability());
assertEquals(1,ask.getCapability().getVirtualCores());
assertEquals(2048,ask.getCapability().getMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLimitsComputation() throws Exception {
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
YarnConfiguration conf=new YarnConfiguration();
CapacitySchedulerContext csContext=mock(CapacitySchedulerContext.class);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getConf()).thenReturn(conf);
when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB,1));
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB,16));
when(csContext.getApplicationComparator()).thenReturn(CapacityScheduler.applicationComparator);
when(csContext.getQueueComparator()).thenReturn(CapacityScheduler.queueComparator);
when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 16);
when(csContext.getClusterResource()).thenReturn(clusterResource);
Map queues=new HashMap();
CSQueue root=CapacityScheduler.parseQueue(csContext,csConf,null,"root",queues,queues,TestUtils.spyHook);
LeafQueue queue=(LeafQueue)queues.get(A);
LOG.info("Queue 'A' -" + " maxActiveApplications=" + queue.getMaximumActiveApplications() + " maxActiveApplicationsPerUser="+ queue.getMaximumActiveApplicationsPerUser());
int expectedMaxActiveApps=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()) * queue.getAbsoluteMaximumCapacity()));
assertEquals(expectedMaxActiveApps,queue.getMaximumActiveApplications());
int expectedMaxActiveAppsUsingAbsCap=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePercent() * queue.getAbsoluteCapacity()));
assertEquals((int)Math.ceil(expectedMaxActiveAppsUsingAbsCap * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()),queue.getMaximumActiveApplicationsPerUser());
assertEquals((int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),queue.getMetrics().getAvailableMB());
clusterResource=Resources.createResource(120 * 16 * GB);
root.updateClusterResource(clusterResource);
expectedMaxActiveApps=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()) * queue.getAbsoluteMaximumCapacity()));
assertEquals(expectedMaxActiveApps,queue.getMaximumActiveApplications());
expectedMaxActiveAppsUsingAbsCap=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePercent() * queue.getAbsoluteCapacity()));
assertEquals((int)Math.ceil(expectedMaxActiveAppsUsingAbsCap * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()),queue.getMaximumActiveApplicationsPerUser());
assertEquals((int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),queue.getMetrics().getAvailableMB());
assertEquals((int)CapacitySchedulerConfiguration.UNDEFINED,csConf.getMaximumApplicationsPerQueue(queue.getQueuePath()));
int expectedMaxApps=(int)(CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS * queue.getAbsoluteCapacity());
assertEquals(expectedMaxApps,queue.getMaxApplications());
int expectedMaxAppsPerUser=(int)(expectedMaxApps * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor());
assertEquals(expectedMaxAppsPerUser,queue.getMaxApplicationsPerUser());
assertEquals((long)CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT,(long)csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()));
csConf.setFloat("yarn.scheduler.capacity." + queue.getQueuePath() + ".maximum-am-resource-percent",0.5f);
queues=new HashMap();
root=CapacityScheduler.parseQueue(csContext,csConf,null,"root",queues,queues,TestUtils.spyHook);
clusterResource=Resources.createResource(100 * 16 * GB);
queue=(LeafQueue)queues.get(A);
expectedMaxActiveApps=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()) * queue.getAbsoluteMaximumCapacity()));
assertEquals((long)0.5,(long)csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()));
assertEquals(expectedMaxActiveApps,queue.getMaximumActiveApplications());
csConf.setInt("yarn.scheduler.capacity." + queue.getQueuePath() + ".maximum-applications",9999);
queues=new HashMap();
root=CapacityScheduler.parseQueue(csContext,csConf,null,"root",queues,queues,TestUtils.spyHook);
queue=(LeafQueue)queues.get(A);
assertEquals(9999,(int)csConf.getMaximumApplicationsPerQueue(queue.getQueuePath()));
assertEquals(9999,queue.getMaxApplications());
expectedMaxAppsPerUser=(int)(9999 * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor());
assertEquals(expectedMaxAppsPerUser,queue.getMaxApplicationsPerUser());
}
InternalCallVerifier EqualityVerifier
@Test public void testActiveApplicationLimits() throws Exception {
final String user_0="user_0";
final String user_1="user_1";
int APPLICATION_ID=0;
FiCaSchedulerApp app_0=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_0,user_0);
assertEquals(1,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(1,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
FiCaSchedulerApp app_1=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_1,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
FiCaSchedulerApp app_2=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_2,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
queue.finishApplicationAttempt(app_0,A);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
FiCaSchedulerApp app_3=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_3,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
doReturn(3).when(queue).getMaximumActiveApplications();
FiCaSchedulerApp app_4=getMockApplication(APPLICATION_ID++,user_1);
queue.submitApplicationAttempt(app_4,user_1);
assertEquals(3,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
assertEquals(1,queue.getNumActiveApplications(user_1));
assertEquals(0,queue.getNumPendingApplications(user_1));
FiCaSchedulerApp app_5=getMockApplication(APPLICATION_ID++,user_1);
queue.submitApplicationAttempt(app_5,user_1);
assertEquals(3,queue.getNumActiveApplications());
assertEquals(2,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
assertEquals(1,queue.getNumActiveApplications(user_1));
assertEquals(1,queue.getNumPendingApplications(user_1));
queue.finishApplicationAttempt(app_4,A);
assertEquals(3,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
assertEquals(1,queue.getNumActiveApplications(user_1));
assertEquals(0,queue.getNumPendingApplications(user_1));
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testActiveLimitsWithKilledApps() throws Exception {
final String user_0="user_0";
int APPLICATION_ID=0;
doReturn(2).when(queue).getMaximumActiveApplications();
FiCaSchedulerApp app_0=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_0,user_0);
assertEquals(1,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(1,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertTrue(queue.activeApplications.contains(app_0));
FiCaSchedulerApp app_1=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_1,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertTrue(queue.activeApplications.contains(app_1));
FiCaSchedulerApp app_2=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_2,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
assertTrue(queue.pendingApplications.contains(app_2));
FiCaSchedulerApp app_3=getMockApplication(APPLICATION_ID++,user_0);
queue.submitApplicationAttempt(app_3,user_0);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(2,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(2,queue.getNumPendingApplications(user_0));
assertTrue(queue.pendingApplications.contains(app_3));
queue.finishApplicationAttempt(app_2,A);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(1,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(1,queue.getNumPendingApplications(user_0));
assertFalse(queue.pendingApplications.contains(app_2));
assertFalse(queue.activeApplications.contains(app_2));
queue.finishApplicationAttempt(app_0,A);
assertEquals(2,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(2,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertTrue(queue.activeApplications.contains(app_3));
assertFalse(queue.pendingApplications.contains(app_3));
assertFalse(queue.activeApplications.contains(app_0));
queue.finishApplicationAttempt(app_1,A);
assertEquals(1,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(1,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertFalse(queue.activeApplications.contains(app_1));
queue.finishApplicationAttempt(app_3,A);
assertEquals(0,queue.getNumActiveApplications());
assertEquals(0,queue.getNumPendingApplications());
assertEquals(0,queue.getNumActiveApplications(user_0));
assertEquals(0,queue.getNumPendingApplications(user_0));
assertFalse(queue.activeApplications.contains(app_3));
}
InternalCallVerifier EqualityVerifier
@Test public void testAbsoluteMaxAvailCapacityNoUse() throws Exception {
ResourceCalculator resourceCalculator=new DefaultResourceCalculator();
Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 32);
YarnConfiguration conf=new YarnConfiguration();
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
CapacitySchedulerContext csContext=mock(CapacitySchedulerContext.class);
when(csContext.getConf()).thenReturn(conf);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getClusterResource()).thenReturn(clusterResource);
when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB,1));
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB,32));
final String L1Q1="L1Q1";
csConf.setQueues(CapacitySchedulerConfiguration.ROOT,new String[]{L1Q1});
final String L1Q1P=CapacitySchedulerConfiguration.ROOT + "." + L1Q1;
csConf.setCapacity(L1Q1P,90);
csConf.setMaximumCapacity(L1Q1P,90);
ParentQueue root=new ParentQueue(csContext,CapacitySchedulerConfiguration.ROOT,null,null);
LeafQueue l1q1=new LeafQueue(csContext,L1Q1,root,null);
LOG.info("t1 root " + CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,root));
LOG.info("t1 l1q1 " + CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l1q1));
assertEquals(1.0f,CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,root),0.000001f);
assertEquals(0.9f,CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l1q1),0.000001f);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAbsoluteMaxAvailCapacityWithUse() throws Exception {
ResourceCalculator resourceCalculator=new DefaultResourceCalculator();
Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 32);
YarnConfiguration conf=new YarnConfiguration();
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
CapacitySchedulerContext csContext=mock(CapacitySchedulerContext.class);
when(csContext.getConf()).thenReturn(conf);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getClusterResource()).thenReturn(clusterResource);
when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB,1));
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB,32));
final String L1Q1="L1Q1";
final String L1Q2="L1Q2";
final String L2Q1="L2Q1";
final String L2Q2="L2Q2";
csConf.setQueues(CapacitySchedulerConfiguration.ROOT,new String[]{L1Q1,L1Q2,L2Q1,L2Q2});
final String L1Q1P=CapacitySchedulerConfiguration.ROOT + "." + L1Q1;
csConf.setCapacity(L1Q1P,80);
csConf.setMaximumCapacity(L1Q1P,80);
final String L1Q2P=CapacitySchedulerConfiguration.ROOT + "." + L1Q2;
csConf.setCapacity(L1Q2P,20);
csConf.setMaximumCapacity(L1Q2P,100);
final String L2Q1P=L1Q1P + "." + L2Q1;
csConf.setCapacity(L2Q1P,50);
csConf.setMaximumCapacity(L2Q1P,50);
final String L2Q2P=L1Q1P + "." + L2Q2;
csConf.setCapacity(L2Q2P,50);
csConf.setMaximumCapacity(L2Q2P,50);
float result;
ParentQueue root=new ParentQueue(csContext,CapacitySchedulerConfiguration.ROOT,null,null);
LeafQueue l1q1=new LeafQueue(csContext,L1Q1,root,null);
LeafQueue l1q2=new LeafQueue(csContext,L1Q2,root,null);
LeafQueue l2q2=new LeafQueue(csContext,L2Q2,l1q1,null);
LeafQueue l2q1=new LeafQueue(csContext,L2Q1,l1q1,null);
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.4f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.1f));
Resources.addTo(l1q2.getUsedResources(),Resources.multiply(clusterResource,0.1f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.4f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.3f));
Resources.addTo(l1q2.getUsedResources(),Resources.multiply(clusterResource,0.3f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.3f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.1f));
Resources.addTo(l1q1.getUsedResources(),Resources.multiply(clusterResource,0.1f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.3f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.2f));
Resources.addTo(l1q1.getUsedResources(),Resources.multiply(clusterResource,0.2f));
Resources.addTo(l2q1.getUsedResources(),Resources.multiply(clusterResource,0.2f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.3f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.2f));
Resources.addTo(l1q1.getUsedResources(),Resources.multiply(clusterResource,0.2f));
Resources.addTo(l2q1.getUsedResources(),Resources.multiply(clusterResource,0.2f));
result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2);
assertEquals(0.1f,result,0.000001f);
LOG.info("t2 l2q2 " + result);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testAllocateDoesNotBlockOnSchedulerLock() throws Exception {
final YarnConfiguration conf=new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
MyContainerManager containerManager=new MyContainerManager();
final MockRMWithAMS rm=new MockRMWithAMS(conf,containerManager);
rm.start();
MockNM nm1=rm.registerNode("localhost:1234",5120);
Map acls=new HashMap(2);
acls.put(ApplicationAccessType.VIEW_APP,"*");
RMApp app=rm.submitApp(1024,"appname","appuser",acls);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
int msecToWait=10000;
int msecToSleep=100;
while (attempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED && msecToWait > 0) {
LOG.info("Waiting for AppAttempt to reach LAUNCHED state. " + "Current state is " + attempt.getAppAttemptState());
Thread.sleep(msecToSleep);
msecToWait-=msecToSleep;
}
Assert.assertEquals(attempt.getAppAttemptState(),RMAppAttemptState.LAUNCHED);
final YarnRPC rpc=YarnRPC.create(conf);
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){
@Override public ApplicationMasterProtocol run(){
return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,rmBindAddress,conf);
}
}
);
RegisterApplicationMasterRequest request=RegisterApplicationMasterRequest.newInstance("localhost",12345,"");
client.registerApplicationMaster(request);
final CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler();
final CyclicBarrier barrier=new CyclicBarrier(2);
Thread otherThread=new Thread(new Runnable(){
@Override public void run(){
synchronized (cs) {
try {
barrier.await();
barrier.await();
}
catch ( InterruptedException e) {
e.printStackTrace();
}
catch ( BrokenBarrierException e) {
e.printStackTrace();
}
}
}
}
);
otherThread.start();
barrier.await();
AllocateRequest allocateRequest=AllocateRequest.newInstance(0,0.0f,null,null,null);
client.allocate(allocateRequest);
barrier.await();
otherThread.join();
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testReconnectedNode() throws Exception {
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
CapacityScheduler cs=new CapacityScheduler();
cs.setConf(new YarnConfiguration());
cs.setRMContext(resourceManager.getRMContext());
cs.init(csConf);
cs.start();
cs.reinitialize(csConf,new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(csConf),new NMTokenSecretManagerInRM(csConf),new ClientToAMTokenSecretManagerInRM(),null));
RMNode n1=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1);
RMNode n2=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),2);
cs.handle(new NodeAddedSchedulerEvent(n1));
cs.handle(new NodeAddedSchedulerEvent(n2));
Assert.assertEquals(6 * GB,cs.getClusterResource().getMemory());
n1=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),1);
cs.handle(new NodeRemovedSchedulerEvent(n1));
cs.handle(new NodeAddedSchedulerEvent(n1));
Assert.assertEquals(4 * GB,cs.getClusterResource().getMemory());
cs.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testRefreshQueuesWithNewQueue() throws Exception {
CapacityScheduler cs=new CapacityScheduler();
CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(conf);
cs.setConf(new YarnConfiguration());
cs.setRMContext(resourceManager.getRMContext());
cs.init(conf);
cs.start();
cs.reinitialize(conf,new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(conf),new NMTokenSecretManagerInRM(conf),new ClientToAMTokenSecretManagerInRM(),null));
checkQueueCapacities(cs,A_CAPACITY,B_CAPACITY);
String B4=B + ".b4";
float B4_CAPACITY=10;
B3_CAPACITY-=B4_CAPACITY;
try {
conf.setCapacity(A,80f);
conf.setCapacity(B,20f);
conf.setQueues(B,new String[]{"b1","b2","b3","b4"});
conf.setCapacity(B1,B1_CAPACITY);
conf.setCapacity(B2,B2_CAPACITY);
conf.setCapacity(B3,B3_CAPACITY);
conf.setCapacity(B4,B4_CAPACITY);
cs.reinitialize(conf,mockContext);
checkQueueCapacities(cs,80f,20f);
CSQueue rootQueue=cs.getRootQueue();
CSQueue queueB=findQueue(rootQueue,B);
CSQueue queueB4=findQueue(queueB,B4);
assertEquals(queueB,queueB4.getParent());
}
finally {
B3_CAPACITY+=B4_CAPACITY;
cs.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAppSameParent() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInA2=scheduler.getAppsInQueue("a2");
assertTrue(appsInA2.isEmpty());
scheduler.moveApplication(app.getApplicationId(),"a2");
appsInA2=scheduler.getAppsInQueue("a2");
assertEquals(1,appsInA2.size());
queue=scheduler.getApplicationAttempt(appsInA2.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a2"));
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testMoveAppQueueMetricsCheck() throws Exception {
ResourceScheduler scheduler=resourceManager.getResourceScheduler();
String host_0="host_0";
NodeManager nm_0=registerNode(host_0,1234,2345,NetworkTopology.DEFAULT_RACK,Resources.createResource(5 * GB,1));
String host_1="host_1";
NodeManager nm_1=registerNode(host_1,1234,2345,NetworkTopology.DEFAULT_RACK,Resources.createResource(5 * GB,1));
Priority priority_0=org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(0);
Priority priority_1=org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(1);
Application application_0=new Application("user_0","a1",resourceManager);
application_0.submit();
application_0.addNodeManager(host_0,1234,nm_0);
application_0.addNodeManager(host_1,1234,nm_1);
Resource capability_0_0=Resources.createResource(3 * GB,1);
application_0.addResourceRequestSpec(priority_1,capability_0_0);
Resource capability_0_1=Resources.createResource(2 * GB,1);
application_0.addResourceRequestSpec(priority_0,capability_0_1);
Task task_0_0=new Task(application_0,priority_1,new String[]{host_0,host_1});
application_0.addTask(task_0_0);
Application application_1=new Application("user_1","b2",resourceManager);
application_1.submit();
application_1.addNodeManager(host_0,1234,nm_0);
application_1.addNodeManager(host_1,1234,nm_1);
Resource capability_1_0=Resources.createResource(1 * GB,1);
application_1.addResourceRequestSpec(priority_1,capability_1_0);
Resource capability_1_1=Resources.createResource(2 * GB,1);
application_1.addResourceRequestSpec(priority_0,capability_1_1);
Task task_1_0=new Task(application_1,priority_1,new String[]{host_0,host_1});
application_1.addTask(task_1_0);
application_0.schedule();
application_1.schedule();
nodeUpdate(nm_0);
nodeUpdate(nm_1);
CapacityScheduler cs=(CapacityScheduler)resourceManager.getResourceScheduler();
CSQueue origRootQ=cs.getRootQueue();
CapacitySchedulerInfo oldInfo=new CapacitySchedulerInfo(origRootQ);
int origNumAppsA=getNumAppsInQueue("a",origRootQ.getChildQueues());
int origNumAppsRoot=origRootQ.getNumApplications();
scheduler.moveApplication(application_0.getApplicationId(),"a2");
CSQueue newRootQ=cs.getRootQueue();
int newNumAppsA=getNumAppsInQueue("a",newRootQ.getChildQueues());
int newNumAppsRoot=newRootQ.getNumApplications();
CapacitySchedulerInfo newInfo=new CapacitySchedulerInfo(newRootQ);
CapacitySchedulerLeafQueueInfo origOldA1=(CapacitySchedulerLeafQueueInfo)getQueueInfo("a1",oldInfo.getQueues());
CapacitySchedulerLeafQueueInfo origNewA1=(CapacitySchedulerLeafQueueInfo)getQueueInfo("a1",newInfo.getQueues());
CapacitySchedulerLeafQueueInfo targetOldA2=(CapacitySchedulerLeafQueueInfo)getQueueInfo("a2",oldInfo.getQueues());
CapacitySchedulerLeafQueueInfo targetNewA2=(CapacitySchedulerLeafQueueInfo)getQueueInfo("a2",newInfo.getQueues());
assertEquals(1,origOldA1.getNumApplications());
assertEquals(1,origNumAppsA);
assertEquals(2,origNumAppsRoot);
assertEquals(0,origNewA1.getNumApplications());
assertEquals(1,newNumAppsA);
assertEquals(2,newNumAppsRoot);
assertEquals(3 * GB,origOldA1.getResourcesUsed().getMemory());
assertEquals(1,origOldA1.getResourcesUsed().getvCores());
assertEquals(0,origNewA1.getResourcesUsed().getMemory());
assertEquals(0,origNewA1.getResourcesUsed().getvCores());
assertEquals(3 * GB,targetNewA2.getResourcesUsed().getMemory());
assertEquals(1,targetNewA2.getResourcesUsed().getvCores());
assertEquals(0,targetOldA2.getNumApplications());
assertEquals(0,targetOldA2.getResourcesUsed().getMemory());
assertEquals(0,targetOldA2.getResourcesUsed().getvCores());
assertEquals(1,targetNewA2.getNumApplications());
assertEquals(1,origOldA1.getNumContainers());
assertEquals(0,origNewA1.getNumContainers());
assertEquals(1,targetNewA2.getNumContainers());
assertEquals(0,targetOldA2.getNumContainers());
assertEquals(3 * GB,origOldA1.getUsers().getUsersList().get(0).getResourcesUsed().getMemory());
assertEquals(1,origOldA1.getUsers().getUsersList().get(0).getResourcesUsed().getvCores());
assertEquals(0,origNewA1.getUsers().getUsersList().size());
assertEquals(3 * GB,targetNewA2.getUsers().getUsersList().get(0).getResourcesUsed().getMemory());
assertEquals(1,targetNewA2.getUsers().getUsersList().get(0).getResourcesUsed().getvCores());
application_0.schedule();
checkApplicationResourceUsage(3 * GB,application_0);
application_1.schedule();
checkApplicationResourceUsage(1 * GB,application_1);
checkNodeResourceUsage(4 * GB,nm_0);
checkNodeResourceUsage(0 * GB,nm_1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAllApps() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
scheduler.moveAllApps("a1","b1");
Thread.sleep(1000);
appsInB1=scheduler.getAppsInQueue("b1");
assertEquals(1,appsInB1.size());
queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("b1"));
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.contains(appAttemptId));
assertEquals(1,appsInB.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKillAllAppsInQueue() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
scheduler.killAllAppsInQueue("a1");
rm.waitForState(app.getApplicationId(),RMAppState.KILLED);
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.isEmpty());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testMaximumCapacitySetup(){
float delta=0.0000001f;
CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration();
assertEquals(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE,conf.getMaximumCapacity(A),delta);
conf.setMaximumCapacity(A,50.0f);
assertEquals(50.0f,conf.getMaximumCapacity(A),delta);
conf.setMaximumCapacity(A,-1);
assertEquals(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE,conf.getMaximumCapacity(A),delta);
}
InternalCallVerifier EqualityVerifier
@Test public void testNumClusterNodes() throws Exception {
YarnConfiguration conf=new YarnConfiguration();
CapacityScheduler cs=new CapacityScheduler();
cs.setConf(conf);
RMContextImpl rmContext=new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(conf),new NMTokenSecretManagerInRM(conf),new ClientToAMTokenSecretManagerInRM(),null);
cs.setRMContext(rmContext);
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
cs.init(csConf);
cs.start();
assertEquals(0,cs.getNumClusterNodes());
RMNode n1=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1);
RMNode n2=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),2);
cs.handle(new NodeAddedSchedulerEvent(n1));
cs.handle(new NodeAddedSchedulerEvent(n2));
assertEquals(2,cs.getNumClusterNodes());
cs.handle(new NodeRemovedSchedulerEvent(n1));
assertEquals(1,cs.getNumClusterNodes());
cs.handle(new NodeAddedSchedulerEvent(n1));
assertEquals(2,cs.getNumClusterNodes());
cs.handle(new NodeRemovedSchedulerEvent(n2));
cs.handle(new NodeRemovedSchedulerEvent(n1));
assertEquals(0,cs.getNumClusterNodes());
cs.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAppBasic() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("a1"));
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
scheduler.moveApplication(app.getApplicationId(),"b1");
appsInB1=scheduler.getAppsInQueue("b1");
assertEquals(1,appsInB1.size());
queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName();
Assert.assertTrue(queue.equals("b1"));
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.contains(appAttemptId));
assertEquals(1,appsInB.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInA1=scheduler.getAppsInQueue("a1");
assertTrue(appsInA1.isEmpty());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.isEmpty());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAddAndRemoveAppFromCapacityScheduler() throws Exception {
CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(conf);
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
@SuppressWarnings("unchecked") AbstractYarnScheduler cs=(AbstractYarnScheduler)rm.getResourceScheduler();
SchedulerApplication app=TestSchedulerUtils.verifyAppAddedAndRemovedFromScheduler(cs.getSchedulerApplications(),cs,"a1");
Assert.assertEquals("a1",app.getQueue().getQueueName());
}
InternalCallVerifier BooleanVerifier
@Test(timeout=5000) public void testApplicationComparator(){
CapacityScheduler cs=new CapacityScheduler();
Comparator appComparator=cs.getApplicationComparator();
ApplicationId id1=ApplicationId.newInstance(1,1);
ApplicationId id2=ApplicationId.newInstance(1,2);
ApplicationId id3=ApplicationId.newInstance(2,1);
FiCaSchedulerApp app1=Mockito.mock(FiCaSchedulerApp.class);
when(app1.getApplicationId()).thenReturn(id1);
FiCaSchedulerApp app2=Mockito.mock(FiCaSchedulerApp.class);
when(app2.getApplicationId()).thenReturn(id2);
FiCaSchedulerApp app3=Mockito.mock(FiCaSchedulerApp.class);
when(app3.getApplicationId()).thenReturn(id3);
assertTrue(appComparator.compare(app1,app2) < 0);
assertTrue(appComparator.compare(app1,app3) < 0);
assertTrue(appComparator.compare(app2,app3) < 0);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=30000) public void testRecoverRequestAfterPreemption() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=rm1.registerNode("127.0.0.1:1234",8000);
RMApp app1=rm1.submitApp(1024);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
CapacityScheduler cs=(CapacityScheduler)rm1.getResourceScheduler();
am1.allocate("127.0.0.1",1024,1,new ArrayList());
ContainerId containerId1=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId1,RMContainerState.ALLOCATED);
RMContainer rmContainer=cs.getRMContainer(containerId1);
List requests=rmContainer.getResourceRequests();
FiCaSchedulerApp app=cs.getApplicationAttempt(am1.getApplicationAttemptId());
FiCaSchedulerNode node=cs.getNode(rmContainer.getAllocatedNode());
for ( ResourceRequest request : requests) {
if (request.getResourceName().equals(node.getRackName()) || request.getResourceName().equals(ResourceRequest.ANY)) {
continue;
}
Assert.assertNull(app.getResourceRequest(request.getPriority(),request.getResourceName()));
}
cs.killContainer(rmContainer);
Assert.assertEquals(3,requests.size());
for ( ResourceRequest request : requests) {
Assert.assertEquals(1,app.getResourceRequest(request.getPriority(),request.getResourceName()).getNumContainers());
}
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),3);
rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED);
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
Assert.assertTrue(containers.size() == 1);
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAllAppsInvalidDestination() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
try {
scheduler.moveAllApps("a1","DOES_NOT_EXIST");
Assert.fail();
}
catch ( YarnException e) {
}
appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@SuppressWarnings("resource") @Test public void testBlackListNodes() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler();
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,host);
cs.handle(new NodeAddedSchedulerEvent(node));
ApplicationId appId=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
SchedulerEvent addAppEvent=new AppAddedSchedulerEvent(appId,"default","user");
cs.handle(addAppEvent);
SchedulerEvent addAttemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
cs.handle(addAttemptEvent);
cs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
Assert.assertTrue(cs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
cs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
Assert.assertFalse(cs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
rm.stop();
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testCapacitySchedulerInfo() throws Exception {
QueueInfo queueInfo=resourceManager.getResourceScheduler().getQueueInfo("a",true,true);
Assert.assertEquals(queueInfo.getQueueName(),"a");
Assert.assertEquals(queueInfo.getChildQueues().size(),2);
List userACLInfo=resourceManager.getResourceScheduler().getQueueUserAclInfo();
Assert.assertNotNull(userACLInfo);
for ( QueueUserACLInfo queueUserACLInfo : userACLInfo) {
Assert.assertEquals(getQueueCount(userACLInfo,queueUserACLInfo.getQueueName()),1);
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveAllAppsInvalidSource() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
List appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
List appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
try {
scheduler.moveAllApps("DOES_NOT_EXIST","b1");
Assert.fail();
}
catch ( YarnException e) {
}
appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
appsInB1=scheduler.getAppsInQueue("b1");
assertTrue(appsInB1.isEmpty());
appsInB=scheduler.getAppsInQueue("b");
assertTrue(appsInB.isEmpty());
rm.stop();
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetAppsInQueue() throws Exception {
Application application_0=new Application("user_0","a1",resourceManager);
application_0.submit();
Application application_1=new Application("user_0","a2",resourceManager);
application_1.submit();
Application application_2=new Application("user_0","b2",resourceManager);
application_2.submit();
ResourceScheduler scheduler=resourceManager.getResourceScheduler();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(application_0.getApplicationAttemptId()));
assertTrue(appsInA.contains(application_1.getApplicationAttemptId()));
assertEquals(2,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(application_0.getApplicationAttemptId()));
assertTrue(appsInRoot.contains(application_1.getApplicationAttemptId()));
assertTrue(appsInRoot.contains(application_2.getApplicationAttemptId()));
assertEquals(3,appsInRoot.size());
Assert.assertNull(scheduler.getAppsInQueue("nonexistentqueue"));
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testKillAllAppsInvalidSource() throws Exception {
MockRM rm=setUpMove();
AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler();
RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1");
ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId();
List appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
List appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
List appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
try {
scheduler.killAllAppsInQueue("DOES_NOT_EXIST");
Assert.fail();
}
catch ( YarnException e) {
}
appsInA1=scheduler.getAppsInQueue("a1");
assertEquals(1,appsInA1.size());
appsInA=scheduler.getAppsInQueue("a");
assertTrue(appsInA.contains(appAttemptId));
assertEquals(1,appsInA.size());
appsInRoot=scheduler.getAppsInQueue("root");
assertTrue(appsInRoot.contains(appAttemptId));
assertEquals(1,appsInRoot.size());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNormalContainerAllocationWhenDNSUnavailable() throws Exception {
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=rm1.registerNode("unknownhost:1234",8000);
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
am1.allocate("127.0.0.1",1024,1,new ArrayList());
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED);
SecurityUtilTestHelper.setTokenServiceUseIp(true);
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
Assert.assertEquals(0,containers.size());
SecurityUtilTestHelper.setTokenServiceUseIp(false);
containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
Assert.assertEquals(1,containers.size());
}
IterativeVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=20000) public void testAMContainerAllocationWhenDNSUnavailable() throws Exception {
MockRM rm1=new MockRM(conf){
@Override protected RMSecretManagerService createRMSecretManagerService(){
return new TestRMSecretManagerService(conf,rmContext);
}
}
;
rm1.start();
MockNM nm1=rm1.registerNode("unknownhost:1234",8000);
SecurityUtilTestHelper.setTokenServiceUseIp(true);
RMApp app1=rm1.submitApp(200);
RMAppAttempt attempt=app1.getCurrentAppAttempt();
nm1.nodeHeartbeat(true);
while (numRetries <= 5) {
nm1.nodeHeartbeat(true);
Thread.sleep(1000);
Assert.assertEquals(RMAppAttemptState.SCHEDULED,attempt.getAppAttemptState());
System.out.println("Waiting for am container to be allocated.");
}
SecurityUtilTestHelper.setTokenServiceUseIp(false);
rm1.waitForState(attempt.getAppAttemptId(),RMAppAttemptState.ALLOCATED);
MockRM.launchAndRegisterAM(app1,rm1,nm1);
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testContainerTokenGeneratedOnPullRequest() throws Exception {
MockRM rm1=new MockRM(conf);
rm1.start();
MockNM nm1=rm1.registerNode("127.0.0.1:1234",8000);
RMApp app1=rm1.submitApp(200);
MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1);
am1.allocate("127.0.0.1",1024,1,new ArrayList());
ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2);
rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED);
RMContainer container=rm1.getResourceScheduler().getRMContainer(containerId2);
Assert.assertEquals(containerId2,container.getContainerId());
Assert.assertNull(container.getContainer().getContainerToken());
List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers();
Assert.assertEquals(containerId2,containers.get(0).getId());
Assert.assertNotNull(containers.get(0).getContainerToken());
rm1.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=3000000) public void testExcessReservationThanNodeManagerCapacity() throws Exception {
MockRM rm=new MockRM(conf);
rm.start();
MockNM nm1=rm.registerNode("127.0.0.1:1234",2 * GB,4);
MockNM nm2=rm.registerNode("127.0.0.1:2234",3 * GB,4);
nm1.nodeHeartbeat(true);
nm2.nodeHeartbeat(true);
int waitCount=20;
int size=rm.getRMContext().getRMNodes().size();
while ((size=rm.getRMContext().getRMNodes().size()) != 2 && waitCount-- > 0) {
LOG.info("Waiting for node managers to register : " + size);
Thread.sleep(100);
}
Assert.assertEquals(2,rm.getRMContext().getRMNodes().size());
RMApp app1=rm.submitApp(128);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1=app1.getCurrentAppAttempt();
MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
LOG.info("sending container requests ");
am1.addRequests(new String[]{"*"},3 * GB,1,1);
AllocateResponse alloc1Response=am1.schedule();
nm1.nodeHeartbeat(true);
int waitCounter=20;
LOG.info("heartbeating nm1");
while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(500);
alloc1Response=am1.schedule();
}
LOG.info("received container : " + alloc1Response.getAllocatedContainers().size());
Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 0);
LOG.info("heartbeating nm2");
waitCounter=20;
nm2.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(500);
alloc1Response=am1.schedule();
}
LOG.info("received container : " + alloc1Response.getAllocatedContainers().size());
Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 1);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleQueueOneUserMetrics() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(B));
final String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_0);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
final int numNodes=1;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,3,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals((int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - (1 * GB),a.getMetrics().getAvailableMB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleQueueWithOneUser() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_0);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
final int numNodes=1;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,3,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(1 * GB,a.getMetrics().getAllocatedMB());
assertEquals(0 * GB,a.getMetrics().getAvailableMB());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
a.setUserLimitFactor(10);
a.assignContainers(clusterResource,node_0);
assertEquals(3 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(3 * GB,a.getMetrics().getAllocatedMB());
a.assignContainers(clusterResource,node_0);
assertEquals(4 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(4 * GB,a.getMetrics().getAllocatedMB());
a.setMaxCapacity(0.5f);
a.assignContainers(clusterResource,node_0);
assertEquals(4 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(4 * GB,a.getMetrics().getAllocatedMB());
for ( RMContainer rmContainer : app_0.getLiveContainers()) {
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(1 * GB,a.getMetrics().getAllocatedMB());
for ( RMContainer rmContainer : app_1.getLiveContainers()) {
a.completedContainer(clusterResource,app_1,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(0 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(0 * GB,a.getMetrics().getAllocatedMB());
assertEquals((int)(a.getCapacity() * node_0.getTotalResource().getMemory()),a.getMetrics().getAvailableMB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testActivateApplicationByUpdatingClusterResource() throws Exception {
LeafQueue e=stubLeafQueue((LeafQueue)queues.get(E));
final String user_e="user_e";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_0,user_e);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_1,user_e);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_2,user_e);
assertEquals(2,e.activeApplications.size());
assertEquals(1,e.pendingApplications.size());
e.updateClusterResource(Resources.createResource(200 * 16 * GB,100 * 32));
assertEquals(3,e.activeApplications.size());
assertEquals(0,e.pendingApplications.size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testNodeLocalityAfterQueueRefresh() throws Exception {
LeafQueue e=stubLeafQueue((LeafQueue)queues.get(E));
assertEquals(40,e.getNodeLocalityDelay());
csConf.setInt(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY,60);
Map newQueues=new HashMap();
CSQueue newRoot=CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,newQueues,queues,TestUtils.spyHook);
queues=newQueues;
root.reinitialize(newRoot,cs.getClusterResource());
assertEquals(60,e.getNodeLocalityDelay());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testMaxAMResourcePerQueuePercentAfterQueueRefresh() throws Exception {
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 32);
CapacitySchedulerContext csContext=mockCSContext(csConf,clusterResource);
csConf.setFloat(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,0.1f);
ParentQueue root=new ParentQueue(csContext,CapacitySchedulerConfiguration.ROOT,null,null);
csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + A,80);
LeafQueue a=new LeafQueue(csContext,A,root,null);
assertEquals(0.1f,a.getMaxAMResourcePerQueuePercent(),1e-3f);
assertEquals(160,a.getMaximumActiveApplications());
csConf.setFloat(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,0.2f);
LeafQueue newA=new LeafQueue(csContext,A,root,null);
a.reinitialize(newA,clusterResource);
assertEquals(0.2f,a.getMaxAMResourcePerQueuePercent(),1e-3f);
assertEquals(320,a.getMaximumActiveApplications());
Resource newClusterResource=Resources.createResource(100 * 20 * GB,100 * 32);
a.updateClusterResource(newClusterResource);
assertEquals(400,a.getMaximumActiveApplications());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLocalityConstraints() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=spy(new FiCaSchedulerApp(appAttemptId_1,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_1,user_0);
String host_0_0="127.0.0.1";
String rack_0="rack_0";
String host_0_1="127.0.0.2";
FiCaSchedulerNode node_0_1=TestUtils.getMockNode(host_0_1,rack_0,0,8 * GB);
String host_1_0="127.0.0.3";
String rack_1="rack_1";
FiCaSchedulerNode node_1_0=TestUtils.getMockNode(host_1_0,rack_1,0,8 * GB);
String host_1_1="127.0.0.4";
FiCaSchedulerNode node_1_1=TestUtils.getMockNode(host_1_1,rack_1,0,8 * GB);
final int numNodes=4;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 1);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
List app_0_requests_0=new ArrayList();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,false,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,false,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
app_0.updateBlacklist(Collections.singletonList(host_0_0),null);
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_0_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_0_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
a.assignContainers(clusterResource,node_1_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_0_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
app_0.updateBlacklist(Collections.singletonList(host_1_1),null);
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_1_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
app_0.updateResourceRequests(app_0_requests_0);
app_0.updateBlacklist(Collections.singletonList(rack_1),Collections.singletonList(host_1_1));
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_1_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
app_0.updateResourceRequests(app_0_requests_0);
app_0.updateBlacklist(null,Collections.singletonList(rack_1));
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_1_1);
verify(app_0,never()).allocate(eq(NodeType.RACK_LOCAL),eq(node_1_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(1,app_0.getTotalRequiredResources(priority));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,false,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,false,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
app_0_requests_0.clear();
a.assignContainers(clusterResource,node_1_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testStolenReservedContainer() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_1,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,4 * GB);
String host_1="127.0.0.2";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,4 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (4 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority,recordFactory)));
ArrayList appRequests_1=new ArrayList(4);
appRequests_1.add(TestUtils.createResourceRequest(host_0,4 * GB,1,true,priority,recordFactory));
appRequests_1.add(TestUtils.createResourceRequest(DEFAULT_RACK,4 * GB,1,true,priority,recordFactory));
appRequests_1.add(TestUtils.createResourceRequest(ResourceRequest.ANY,4 * GB,2,true,priority,recordFactory));
app_1.updateResourceRequests(appRequests_1);
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
assertEquals(0 * GB,a.getMetrics().getAvailableMB());
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(2 * GB,node_0.getUsedResource().getMemory());
assertEquals(4 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
doReturn(-1).when(a).getNodeLocalityDelay();
a.assignContainers(clusterResource,node_1);
assertEquals(10 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(4 * GB,node_1.getUsedResource().getMemory());
assertEquals(4 * GB,a.getMetrics().getReservedMB());
assertEquals(6 * GB,a.getMetrics().getAllocatedMB());
RMContainer rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
a.assignContainers(clusterResource,node_0);
assertEquals(8 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(8 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(4 * GB,node_0.getUsedResource().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(8 * GB,a.getMetrics().getAllocatedMB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleQueueWithMultipleUsers() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final String user_2="user_2";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_1,user_0);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_1,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_2,user_1);
final ApplicationAttemptId appAttemptId_3=TestUtils.getMockApplicationAttemptId(3,0);
FiCaSchedulerApp app_3=new FiCaSchedulerApp(appAttemptId_3,user_2,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_3,user_2);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
final int numNodes=1;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,10,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,10,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.setUserLimit(25);
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,3 * GB,1,true,priority,recordFactory)));
app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
a.setUserLimitFactor(10);
a.assignContainers(clusterResource,node_0);
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory());
a.setMaxCapacity(0.5f);
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory());
a.setMaxCapacity(1.0f);
a.setUserLimitFactor(1);
a.assignContainers(clusterResource,node_0);
assertEquals(7 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_3.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(8 * GB,a.getUsedResources().getMemory());
assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(2 * GB,app_3.getCurrentConsumption().getMemory());
for ( RMContainer rmContainer : app_0.getLiveContainers()) {
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(2 * GB,app_3.getCurrentConsumption().getMemory());
for ( RMContainer rmContainer : app_2.getLiveContainers()) {
a.completedContainer(clusterResource,app_2,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(2 * GB,app_3.getCurrentConsumption().getMemory());
for ( RMContainer rmContainer : app_3.getLiveContainers()) {
a.completedContainer(clusterResource,app_3,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
}
assertEquals(0 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testUserLimits() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_1,user_0);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_1,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_2,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
String host_1="127.0.0.2";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,8 * GB);
final int numNodes=2;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
a.setUserLimit(50);
a.setUserLimitFactor(2);
assertEquals("There should only be 1 active user!",1,a.getActiveUsersManager().getNumActiveUsers());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(3 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_1);
assertEquals(4 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(2 * GB,app_1.getCurrentConsumption().getMemory());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testInheritedQueueAcls() throws IOException {
UserGroupInformation user=UserGroupInformation.getCurrentUser();
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
LeafQueue b=stubLeafQueue((LeafQueue)queues.get(B));
ParentQueue c=(ParentQueue)queues.get(C);
LeafQueue c1=stubLeafQueue((LeafQueue)queues.get(C1));
assertFalse(root.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(a.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(b.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(c.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(c1.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(hasQueueACL(a.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
assertTrue(hasQueueACL(b.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(c.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
assertFalse(hasQueueACL(c1.getQueueUserAclInfo(user),QueueACL.SUBMIT_APPLICATIONS));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testInitializeQueue() throws Exception {
final float epsilon=1e-5f;
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
assertEquals(0.085,a.getCapacity(),epsilon);
assertEquals(0.085,a.getAbsoluteCapacity(),epsilon);
assertEquals(0.2,a.getMaximumCapacity(),epsilon);
assertEquals(0.2,a.getAbsoluteMaximumCapacity(),epsilon);
LeafQueue b=stubLeafQueue((LeafQueue)queues.get(B));
assertEquals(0.80,b.getCapacity(),epsilon);
assertEquals(0.80,b.getAbsoluteCapacity(),epsilon);
assertEquals(0.99,b.getMaximumCapacity(),epsilon);
assertEquals(0.99,b.getAbsoluteMaximumCapacity(),epsilon);
ParentQueue c=(ParentQueue)queues.get(C);
assertEquals(0.015,c.getCapacity(),epsilon);
assertEquals(0.015,c.getAbsoluteCapacity(),epsilon);
assertEquals(0.1,c.getMaximumCapacity(),epsilon);
assertEquals(0.1,c.getAbsoluteMaximumCapacity(),epsilon);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testHeadroomWithMaxCap() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_1,user_0);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_1,a,a.getActiveUsersManager(),rmContext);
a.submitApplicationAttempt(app_2,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB);
String host_1="127.0.0.2";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,8 * GB);
final int numNodes=2;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),1);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
a.setUserLimit(50);
a.setUserLimitFactor(2);
assertEquals("There should only be 1 active user!",1,a.getActiveUsersManager().getNumActiveUsers());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_0.getHeadroom().getMemory());
assertEquals(0 * GB,app_1.getHeadroom().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(3 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_0.getHeadroom().getMemory());
assertEquals(0 * GB,app_1.getHeadroom().getMemory());
a.setMaxCapacity(.1f);
app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,true,priority,recordFactory)));
assertEquals(2,a.getActiveUsersManager().getNumActiveUsers());
a.assignContainers(clusterResource,node_1);
assertEquals(3 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_2.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_0.getHeadroom().getMemory());
assertEquals(0 * GB,app_1.getHeadroom().getMemory());
LOG.info("here");
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,0,true,priority,recordFactory)));
assertEquals(1,a.getActiveUsersManager().getNumActiveUsers());
a.assignContainers(clusterResource,node_1);
assertEquals(1 * GB,app_2.getHeadroom().getMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLocalityScheduling() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_0,user_0);
String host_0="127.0.0.1";
String rack_0="rack_0";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,rack_0,0,8 * GB);
String host_1="127.0.0.2";
String rack_1="rack_1";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,rack_1,0,8 * GB);
String host_2="127.0.0.3";
String rack_2="rack_2";
FiCaSchedulerNode node_2=TestUtils.getMockNode(host_2,rack_2,0,8 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
List app_0_requests_0=new ArrayList();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,3,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
CSAssignment assignment=null;
assignment=a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(1,app_0.getSchedulingOpportunities(priority));
assertEquals(3,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(2,app_0.getSchedulingOpportunities(priority));
assertEquals(3,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(3,app_0.getSchedulingOpportunities(priority));
assertEquals(3,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_2);
verify(app_0).allocate(eq(NodeType.OFF_SWITCH),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(4,app_0.getSchedulingOpportunities(priority));
assertEquals(2,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.OFF_SWITCH,assignment.getType());
assignment=a.assignContainers(clusterResource,node_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(1,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_1);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_1),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(host_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
assertEquals(2,app_0.getTotalRequiredResources(priority));
String host_3="127.0.0.4";
FiCaSchedulerNode node_3=TestUtils.getMockNode(host_3,rack_1,0,8 * GB);
doReturn(1).when(a).getNodeLocalityDelay();
assignment=a.assignContainers(clusterResource,node_3);
assertEquals(1,app_0.getSchedulingOpportunities(priority));
assertEquals(2,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.NODE_LOCAL,assignment.getType());
assignment=a.assignContainers(clusterResource,node_3);
verify(app_0).allocate(eq(NodeType.RACK_LOCAL),eq(node_3),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(1,app_0.getTotalRequiredResources(priority));
assertEquals(NodeType.RACK_LOCAL,assignment.getType());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppAttemptMetrics() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(B));
final String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,1);
AppAddedSchedulerEvent addAppEvent=new AppAddedSchedulerEvent(appAttemptId_0.getApplicationId(),a.getQueueName(),user_0);
cs.handle(addAppEvent);
AppAttemptAddedSchedulerEvent addAttemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId_0,false);
cs.handle(addAttemptEvent);
AppAttemptRemovedSchedulerEvent event=new AppAttemptRemovedSchedulerEvent(appAttemptId_0,RMAppAttemptState.FAILED,false);
cs.handle(event);
assertEquals(0,a.getMetrics().getAppsPending());
assertEquals(0,a.getMetrics().getAppsFailed());
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(0,2);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,null,rmContext);
a.submitApplicationAttempt(app_1,user_0);
assertEquals(1,a.getMetrics().getAppsSubmitted());
assertEquals(1,a.getMetrics().getAppsPending());
event=new AppAttemptRemovedSchedulerEvent(appAttemptId_0,RMAppAttemptState.FINISHED,false);
cs.handle(event);
AppRemovedSchedulerEvent rEvent=new AppRemovedSchedulerEvent(appAttemptId_0.getApplicationId(),RMAppState.FINISHED);
cs.handle(rEvent);
assertEquals(1,a.getMetrics().getAppsSubmitted());
assertEquals(0,a.getMetrics().getAppsPending());
assertEquals(0,a.getMetrics().getAppsFailed());
assertEquals(1,a.getMetrics().getAppsCompleted());
QueueMetrics userMetrics=a.getMetrics().getUserMetrics(user_0);
assertEquals(1,userMetrics.getAppsSubmitted());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testApplicationPriorityScheduling() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_0,user_0);
String host_0="127.0.0.1";
String rack_0="rack_0";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,rack_0,0,8 * GB);
String host_1="127.0.0.2";
String rack_1="rack_1";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,rack_1,0,8 * GB);
String host_2="127.0.0.3";
String rack_2="rack_2";
FiCaSchedulerNode node_2=TestUtils.getMockNode(host_2,rack_2,0,8 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),1);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
List app_0_requests_0=new ArrayList();
Priority priority_1=TestUtils.createMockPriority(1);
app_0_requests_0.add(TestUtils.createResourceRequest(host_0,1 * GB,1,true,priority_1,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0,1 * GB,1,true,priority_1,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1,1 * GB,1,true,priority_1,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority_1,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority_1,recordFactory));
Priority priority_2=TestUtils.createMockPriority(2);
app_0_requests_0.add(TestUtils.createResourceRequest(host_2,2 * GB,1,true,priority_2,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_2,2 * GB,1,true,priority_2,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority_2,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(1,app_0.getSchedulingOpportunities(priority_1));
assertEquals(2,app_0.getTotalRequiredResources(priority_1));
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_2));
assertEquals(1,app_0.getTotalRequiredResources(priority_2));
a.assignContainers(clusterResource,node_2);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(2,app_0.getSchedulingOpportunities(priority_1));
assertEquals(2,app_0.getTotalRequiredResources(priority_1));
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_2));
assertEquals(1,app_0.getTotalRequiredResources(priority_2));
a.assignContainers(clusterResource,node_2);
verify(app_0).allocate(eq(NodeType.OFF_SWITCH),eq(node_2),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(3,app_0.getSchedulingOpportunities(priority_1));
assertEquals(1,app_0.getTotalRequiredResources(priority_1));
verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_2));
assertEquals(1,app_0.getTotalRequiredResources(priority_2));
a.assignContainers(clusterResource,node_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_0),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_1));
assertEquals(0,app_0.getTotalRequiredResources(priority_1));
verify(app_0,never()).allocate(any(NodeType.class),eq(node_0),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_2));
assertEquals(1,app_0.getTotalRequiredResources(priority_2));
a.assignContainers(clusterResource,node_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1),eq(priority_1),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority_1));
assertEquals(0,app_0.getTotalRequiredResources(priority_1));
verify(app_0).allocate(eq(NodeType.OFF_SWITCH),eq(node_1),eq(priority_2),any(ResourceRequest.class),any(Container.class));
assertEquals(1,app_0.getSchedulingOpportunities(priority_2));
assertEquals(0,app_0.getTotalRequiredResources(priority_2));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReservationExchange() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
a.setUserLimitFactor(10);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_1,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,4 * GB);
String host_1="127.0.0.2";
FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,4 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (4 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(4 * GB,16));
when(a.getMaximumAllocation()).thenReturn(Resources.createResource(4 * GB,16));
when(a.getMinimumAllocationFactor()).thenReturn(0.25f);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,4 * GB,1,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(2 * GB,node_0.getUsedResource().getMemory());
RMContainer rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
a.assignContainers(clusterResource,node_0);
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(1 * GB,node_0.getUsedResource().getMemory());
assertEquals(1,app_1.getReReservations(priority));
a.assignContainers(clusterResource,node_0);
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(1 * GB,node_0.getUsedResource().getMemory());
assertEquals(2,app_1.getReReservations(priority));
a.assignContainers(clusterResource,node_1);
assertEquals(9 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(4 * GB,node_1.getUsedResource().getMemory());
assertEquals(2,app_1.getReReservations(priority));
rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
CSAssignment assignment=a.assignContainers(clusterResource,node_0);
assertEquals(8 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(0 * GB,node_0.getUsedResource().getMemory());
assertEquals(4 * GB,assignment.getExcessReservation().getContainer().getResource().getMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSchedulingConstraints() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
String user_0="user_0";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext));
a.submitApplicationAttempt(app_0,user_0);
String host_0_0="127.0.0.1";
String rack_0="rack_0";
FiCaSchedulerNode node_0_0=TestUtils.getMockNode(host_0_0,rack_0,0,8 * GB);
String host_0_1="127.0.0.2";
FiCaSchedulerNode node_0_1=TestUtils.getMockNode(host_0_1,rack_0,0,8 * GB);
String host_1_0="127.0.0.3";
String rack_1="rack_1";
FiCaSchedulerNode node_1_0=TestUtils.getMockNode(host_1_0,rack_1,0,8 * GB);
final int numNodes=3;
Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
List app_0_requests_0=new ArrayList();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_0_1,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1_0,1 * GB,1,true,priority,recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
a.assignContainers(clusterResource,node_0_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_0_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
a.assignContainers(clusterResource,node_1_0);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,true,priority,recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
a.assignContainers(clusterResource,node_0_1);
verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(1,app_0.getSchedulingOpportunities(priority));
assertEquals(1,app_0.getTotalRequiredResources(priority));
a.assignContainers(clusterResource,node_1_0);
verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class));
assertEquals(0,app_0.getSchedulingOpportunities(priority));
assertEquals(0,app_0.getTotalRequiredResources(priority));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReservation() throws Exception {
LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A));
a.setMaxCapacity(1.0f);
final String user_0="user_0";
final String user_1="user_1";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_0,user_0);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_1,a,mock(ActiveUsersManager.class),rmContext);
a.submitApplicationAttempt(app_1,user_1);
String host_0="127.0.0.1";
FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,4 * GB);
final int numNodes=2;
Resource clusterResource=Resources.createResource(numNodes * (4 * GB),numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
Priority priority=TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)));
app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,4 * GB,1,true,priority,recordFactory)));
a.assignContainers(clusterResource,node_0);
assertEquals(1 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(1 * GB,a.getMetrics().getAllocatedMB());
assertEquals(0 * GB,a.getMetrics().getAvailableMB());
a.assignContainers(clusterResource,node_0);
assertEquals(2 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
a.assignContainers(clusterResource,node_0);
assertEquals(6 * GB,a.getUsedResources().getMemory());
assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(2 * GB,node_0.getUsedResource().getMemory());
assertEquals(4 * GB,a.getMetrics().getReservedMB());
assertEquals(2 * GB,a.getMetrics().getAllocatedMB());
RMContainer rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
a.assignContainers(clusterResource,node_0);
assertEquals(5 * GB,a.getUsedResources().getMemory());
assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(1 * GB,node_0.getUsedResource().getMemory());
assertEquals(4 * GB,a.getMetrics().getReservedMB());
assertEquals(1 * GB,a.getMetrics().getAllocatedMB());
rmContainer=app_0.getLiveContainers().iterator().next();
a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null);
a.assignContainers(clusterResource,node_0);
assertEquals(4 * GB,a.getUsedResources().getMemory());
assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory());
assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory());
assertEquals(0 * GB,app_1.getCurrentReservation().getMemory());
assertEquals(4 * GB,node_0.getUsedResource().getMemory());
assertEquals(0 * GB,a.getMetrics().getReservedMB());
assertEquals(4 * GB,a.getMetrics().getAllocatedMB());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testActivateApplicationAfterQueueRefresh() throws Exception {
LeafQueue e=stubLeafQueue((LeafQueue)queues.get(E));
final String user_e="user_e";
final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0);
FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_0,user_e);
final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0);
FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_1,user_e);
final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0);
FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_e,e,mock(ActiveUsersManager.class),rmContext);
e.submitApplicationAttempt(app_2,user_e);
assertEquals(2,e.activeApplications.size());
assertEquals(1,e.pendingApplications.size());
csConf.setDouble(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT * 2);
Map newQueues=new HashMap();
CSQueue newRoot=CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,newQueues,queues,TestUtils.spyHook);
queues=newQueues;
root.reinitialize(newRoot,cs.getClusterResource());
assertEquals(3,e.activeApplications.size());
assertEquals(0,e.pendingApplications.size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@Test public void testQueueAcl() throws Exception {
setupMultiLevelQueues(csConf);
csConf.setAcl(CapacitySchedulerConfiguration.ROOT,QueueACL.SUBMIT_APPLICATIONS," ");
csConf.setAcl(CapacitySchedulerConfiguration.ROOT,QueueACL.ADMINISTER_QUEUE," ");
final String Q_C=CapacitySchedulerConfiguration.ROOT + "." + C;
csConf.setAcl(Q_C,QueueACL.ADMINISTER_QUEUE,"*");
final String Q_C11=Q_C + "." + C1+ "."+ C11;
csConf.setAcl(Q_C11,QueueACL.SUBMIT_APPLICATIONS,"*");
Map queues=new HashMap();
CSQueue root=CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,queues,queues,TestUtils.spyHook);
UserGroupInformation user=UserGroupInformation.getCurrentUser();
ParentQueue c=(ParentQueue)queues.get(C);
ParentQueue c1=(ParentQueue)queues.get(C1);
ParentQueue c11=(ParentQueue)queues.get(C11);
ParentQueue c111=(ParentQueue)queues.get(C111);
assertFalse(root.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
List aclInfos=root.getQueueUserAclInfo(user);
assertFalse(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"root"));
assertFalse(root.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"root"));
assertTrue(c.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c"));
assertFalse(c.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c"));
assertTrue(c1.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c1"));
assertFalse(c1.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertFalse(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c1"));
assertTrue(c11.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c11"));
assertTrue(c11.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c11"));
assertTrue(c111.hasAccess(QueueACL.ADMINISTER_QUEUE,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.ADMINISTER_QUEUE,"c111"));
assertTrue(c111.hasAccess(QueueACL.SUBMIT_APPLICATIONS,user));
assertTrue(hasQueueACL(aclInfos,QueueACL.SUBMIT_APPLICATIONS,"c111"));
reset(c);
}
InternalCallVerifier EqualityVerifier
@Test public void testQueueParsing() throws Exception {
CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
YarnConfiguration conf=new YarnConfiguration(csConf);
CapacityScheduler capacityScheduler=new CapacityScheduler();
RMContextImpl rmContext=new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(conf),new NMTokenSecretManagerInRM(conf),new ClientToAMTokenSecretManagerInRM(),null);
capacityScheduler.setConf(conf);
capacityScheduler.setRMContext(rmContext);
capacityScheduler.init(conf);
capacityScheduler.start();
capacityScheduler.reinitialize(conf,rmContext);
CSQueue a=capacityScheduler.getQueue("a");
Assert.assertEquals(0.10,a.getAbsoluteCapacity(),DELTA);
Assert.assertEquals(0.15,a.getAbsoluteMaximumCapacity(),DELTA);
CSQueue b1=capacityScheduler.getQueue("b1");
Assert.assertEquals(0.2 * 0.5,b1.getAbsoluteCapacity(),DELTA);
Assert.assertEquals("Parent B has no MAX_CAP",0.85,b1.getAbsoluteMaximumCapacity(),DELTA);
CSQueue c12=capacityScheduler.getQueue("c12");
Assert.assertEquals(0.7 * 0.5 * 0.45,c12.getAbsoluteCapacity(),DELTA);
Assert.assertEquals(0.7 * 0.55 * 0.7,c12.getAbsoluteMaximumCapacity(),DELTA);
capacityScheduler.stop();
}
InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testBackwardsCompatibleAllocationFileParsing() throws Exception {
Configuration conf=new Configuration();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
AllocationFileLoaderService allocLoader=new AllocationFileLoaderService();
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println("2048mb,0vcores ");
out.println("alice,bob admins ");
out.println(" ");
out.println("");
out.println("alice,bob admins ");
out.println(" ");
out.println("");
out.println("3 ");
out.println(" ");
out.println("");
out.println("60 ");
out.println(" ");
out.println("15 ");
out.println("5 ");
out.println("");
out.println("10 ");
out.println(" ");
out.println("120" + " ");
out.println("300 ");
out.println(" ");
out.close();
allocLoader.init(conf);
ReloadListener confHolder=new ReloadListener();
allocLoader.setReloadListener(confHolder);
allocLoader.reloadAllocations();
AllocationConfiguration queueConf=confHolder.allocConf;
assertEquals(5,queueConf.getConfiguredQueues().get(FSQueueType.LEAF).size());
assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(Resources.createResource(1024,0),queueConf.getMinResources("root.queueA"));
assertEquals(Resources.createResource(2048,0),queueConf.getMinResources("root.queueB"));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueC"));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueD"));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueE"));
assertEquals(15,queueConf.getQueueMaxApps("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(15,queueConf.getQueueMaxApps("root.queueA"));
assertEquals(15,queueConf.getQueueMaxApps("root.queueB"));
assertEquals(15,queueConf.getQueueMaxApps("root.queueC"));
assertEquals(3,queueConf.getQueueMaxApps("root.queueD"));
assertEquals(15,queueConf.getQueueMaxApps("root.queueE"));
assertEquals(10,queueConf.getUserMaxApps("user1"));
assertEquals(5,queueConf.getUserMaxApps("user2"));
assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.ADMINISTER_QUEUE).getAclString());
assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.SUBMIT_APPLICATIONS).getAclString());
assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueB",QueueACL.ADMINISTER_QUEUE).getAclString());
assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueC",QueueACL.SUBMIT_APPLICATIONS).getAclString());
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueB"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueC"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueD"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA"));
assertEquals(60000,queueConf.getMinSharePreemptionTimeout("root.queueE"));
assertEquals(300000,queueConf.getFairSharePreemptionTimeout());
}
InternalCallVerifier EqualityVerifier PublicFieldVerifier
@Test public void testSimplePlacementPolicyFromConf() throws Exception {
Configuration conf=new Configuration();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
conf.setBoolean(FairSchedulerConfiguration.ALLOW_UNDECLARED_POOLS,false);
conf.setBoolean(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,false);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println(" ");
out.close();
AllocationFileLoaderService allocLoader=new AllocationFileLoaderService();
allocLoader.init(conf);
ReloadListener confHolder=new ReloadListener();
allocLoader.setReloadListener(confHolder);
allocLoader.reloadAllocations();
AllocationConfiguration allocConf=confHolder.allocConf;
QueuePlacementPolicy placementPolicy=allocConf.getPlacementPolicy();
List rules=placementPolicy.getRules();
assertEquals(2,rules.size());
assertEquals(QueuePlacementRule.Specified.class,rules.get(0).getClass());
assertEquals(false,rules.get(0).create);
assertEquals(QueuePlacementRule.Default.class,rules.get(1).getClass());
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test public void testAllocationFileParsing() throws Exception {
Configuration conf=new Configuration();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
AllocationFileLoaderService allocLoader=new AllocationFileLoaderService();
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println("2048mb,0vcores ");
out.println("alice,bob admins ");
out.println("fair ");
out.println(" ");
out.println("");
out.println("alice,bob admins ");
out.println(" ");
out.println("");
out.println("3 ");
out.println("0.4 ");
out.println(" ");
out.println("");
out.println("60 ");
out.println(" ");
out.println("");
out.println(" ");
out.println("");
out.println(" ");
out.println(" ");
out.println(" ");
out.println("15 ");
out.println("5 ");
out.println("0.5f ");
out.println("");
out.println("10 ");
out.println(" ");
out.println("120" + " ");
out.println("300 ");
out.println("drf ");
out.println(" ");
out.close();
allocLoader.init(conf);
ReloadListener confHolder=new ReloadListener();
allocLoader.setReloadListener(confHolder);
allocLoader.reloadAllocations();
AllocationConfiguration queueConf=confHolder.allocConf;
assertEquals(6,queueConf.getConfiguredQueues().get(FSQueueType.LEAF).size());
assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(Resources.createResource(1024,0),queueConf.getMinResources("root.queueA"));
assertEquals(Resources.createResource(2048,0),queueConf.getMinResources("root.queueB"));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueC"));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueD"));
assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueE"));
assertEquals(15,queueConf.getQueueMaxApps("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(15,queueConf.getQueueMaxApps("root.queueA"));
assertEquals(15,queueConf.getQueueMaxApps("root.queueB"));
assertEquals(15,queueConf.getQueueMaxApps("root.queueC"));
assertEquals(3,queueConf.getQueueMaxApps("root.queueD"));
assertEquals(15,queueConf.getQueueMaxApps("root.queueE"));
assertEquals(10,queueConf.getUserMaxApps("user1"));
assertEquals(5,queueConf.getUserMaxApps("user2"));
assertEquals(.5f,queueConf.getQueueMaxAMShare("root." + YarnConfiguration.DEFAULT_QUEUE_NAME),0.01);
assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueA"),0.01);
assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueB"),0.01);
assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueC"),0.01);
assertEquals(.4f,queueConf.getQueueMaxAMShare("root.queueD"),0.01);
assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueE"),0.01);
assertEquals("*",queueConf.getQueueAcl("root",QueueACL.ADMINISTER_QUEUE).getAclString());
assertEquals("*",queueConf.getQueueAcl("root",QueueACL.SUBMIT_APPLICATIONS).getAclString());
assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.ADMINISTER_QUEUE).getAclString());
assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.SUBMIT_APPLICATIONS).getAclString());
assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueB",QueueACL.ADMINISTER_QUEUE).getAclString());
assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueC",QueueACL.SUBMIT_APPLICATIONS).getAclString());
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueB"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueC"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueD"));
assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA"));
assertEquals(60000,queueConf.getMinSharePreemptionTimeout("root.queueE"));
assertEquals(300000,queueConf.getFairSharePreemptionTimeout());
assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT).contains("root.queueF"));
assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT).contains("root.queueG"));
assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueG.queueH"));
assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root").getName());
assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root.queueA").getName());
assertEquals(FairSharePolicy.NAME,queueConf.getSchedulingPolicy("root.queueB").getName());
assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root.newqueue").getName());
}
InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=10000) public void testReload() throws Exception {
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println(" ");
out.println(" 1 ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
MockClock clock=new MockClock();
Configuration conf=new Configuration();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(clock);
allocLoader.reloadIntervalMs=5;
allocLoader.init(conf);
ReloadListener confHolder=new ReloadListener();
allocLoader.setReloadListener(confHolder);
allocLoader.reloadAllocations();
AllocationConfiguration allocConf=confHolder.allocConf;
QueuePlacementPolicy policy=allocConf.getPlacementPolicy();
List rules=policy.getRules();
assertEquals(1,rules.size());
assertEquals(QueuePlacementRule.Default.class,rules.get(0).getClass());
assertEquals(1,allocConf.getQueueMaxApps("root.queueA"));
assertEquals(2,allocConf.getConfiguredQueues().get(FSQueueType.LEAF).size());
assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueA"));
assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueB"));
confHolder.allocConf=null;
out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println(" ");
out.println(" 3 ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
clock.tick(System.currentTimeMillis() + AllocationFileLoaderService.ALLOC_RELOAD_WAIT_MS + 10000);
allocLoader.start();
while (confHolder.allocConf == null) {
Thread.sleep(20);
}
allocConf=confHolder.allocConf;
policy=allocConf.getPlacementPolicy();
rules=policy.getRules();
assertEquals(3,rules.size());
assertEquals(QueuePlacementRule.Specified.class,rules.get(0).getClass());
assertEquals(QueuePlacementRule.NestedUserQueue.class,rules.get(1).getClass());
assertEquals(QueuePlacementRule.PrimaryGroup.class,((NestedUserQueue)(rules.get(1))).nestedRule.getClass());
assertEquals(QueuePlacementRule.Default.class,rules.get(2).getClass());
assertEquals(3,allocConf.getQueueMaxApps("root.queueB"));
assertEquals(1,allocConf.getConfiguredQueues().get(FSQueueType.LEAF).size());
assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueB"));
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testGetAllocationFileFromClasspath(){
Configuration conf=new Configuration();
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,"test-fair-scheduler.xml");
AllocationFileLoaderService allocLoader=new AllocationFileLoaderService();
File allocationFile=allocLoader.getAllocationFile(conf);
assertEquals("test-fair-scheduler.xml",allocationFile.getName());
assertTrue(allocationFile.exists());
}
IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testDelayScheduling(){
FSLeafQueue queue=Mockito.mock(FSLeafQueue.class);
Priority prio=Mockito.mock(Priority.class);
Mockito.when(prio.getPriority()).thenReturn(1);
double nodeLocalityThreshold=.5;
double rackLocalityThreshold=.6;
ApplicationAttemptId applicationAttemptId=createAppAttemptId(1,1);
RMContext rmContext=resourceManager.getRMContext();
FSAppAttempt schedulerApp=new FSAppAttempt(scheduler,applicationAttemptId,"user1",queue,null,rmContext);
assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold));
for (int i=0; i < 5; i++) {
schedulerApp.addSchedulingOpportunity(prio);
assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold));
}
schedulerApp.addSchedulingOpportunity(prio);
assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold));
schedulerApp.resetAllowedLocalityLevel(prio,NodeType.NODE_LOCAL);
schedulerApp.resetSchedulingOpportunities(prio);
assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold));
for (int i=0; i < 5; i++) {
schedulerApp.addSchedulingOpportunity(prio);
assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold));
}
schedulerApp.addSchedulingOpportunity(prio);
assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold));
for (int i=0; i < 6; i++) {
schedulerApp.addSchedulingOpportunity(prio);
assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold));
}
schedulerApp.addSchedulingOpportunity(prio);
assertEquals(NodeType.OFF_SWITCH,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold));
}
InternalCallVerifier EqualityVerifier
@Test public void testLocalityLevelWithoutDelays(){
FSLeafQueue queue=Mockito.mock(FSLeafQueue.class);
Priority prio=Mockito.mock(Priority.class);
Mockito.when(prio.getPriority()).thenReturn(1);
RMContext rmContext=resourceManager.getRMContext();
ApplicationAttemptId applicationAttemptId=createAppAttemptId(1,1);
FSAppAttempt schedulerApp=new FSAppAttempt(scheduler,applicationAttemptId,"user1",queue,null,rmContext);
assertEquals(NodeType.OFF_SWITCH,schedulerApp.getAllowedLocalityLevel(prio,10,-1.0,-1.0));
}
InternalCallVerifier EqualityVerifier
@Test public void testDelaySchedulingForContinuousScheduling() throws InterruptedException {
FSLeafQueue queue=scheduler.getQueueManager().getLeafQueue("queue",true);
Priority prio=Mockito.mock(Priority.class);
Mockito.when(prio.getPriority()).thenReturn(1);
MockClock clock=new MockClock();
scheduler.setClock(clock);
long nodeLocalityDelayMs=5 * 1000L;
long rackLocalityDelayMs=6 * 1000L;
RMContext rmContext=resourceManager.getRMContext();
ApplicationAttemptId applicationAttemptId=createAppAttemptId(1,1);
FSAppAttempt schedulerApp=new FSAppAttempt(scheduler,applicationAttemptId,"user1",queue,null,rmContext);
assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime()));
clock.tick(4);
assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime()));
clock.tick(2);
assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime()));
schedulerApp.resetAllowedLocalityLevel(prio,NodeType.NODE_LOCAL);
schedulerApp.resetSchedulingOpportunities(prio,clock.getTime());
assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime()));
clock.tick(6);
assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime()));
clock.tick(7);
assertEquals(NodeType.OFF_SWITCH,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime()));
}
InternalCallVerifier BooleanVerifier
@Test public void testUpdateDemand(){
FSAppAttempt app=mock(FSAppAttempt.class);
Mockito.when(app.getDemand()).thenReturn(maxResource);
schedulable.addAppSchedulable(app);
schedulable.addAppSchedulable(app);
schedulable.updateDemand();
assertTrue("Demand is greater than max allowed ",Resources.equals(schedulable.getDemand(),maxResource));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testBasicDRFAssignment() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,BuilderUtils.newResource(8192,5));
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttId1=createSchedulingRequest(2048,1,"queue1","user1",2);
FSAppAttempt app1=scheduler.getSchedulerApp(appAttId1);
ApplicationAttemptId appAttId2=createSchedulingRequest(1024,2,"queue1","user1",2);
FSAppAttempt app2=scheduler.getSchedulerApp(appAttId2);
DominantResourceFairnessPolicy drfPolicy=new DominantResourceFairnessPolicy();
drfPolicy.initialize(scheduler.getClusterResource());
scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
Assert.assertEquals(0,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
Assert.assertEquals(1,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(2,app1.getLiveContainers().size());
Assert.assertEquals(1,app2.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testRecoverRequestAfterPreemption() throws Exception {
conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10);
MockClock clock=new MockClock();
scheduler.setClock(clock);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
Priority priority=Priority.newInstance(20);
String host="127.0.0.1";
int GB=1024;
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * 1024,4),0,host);
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
List ask=new ArrayList();
ResourceRequest nodeLocalRequest=createResourceRequest(GB,1,host,priority.getPriority(),1,true);
ResourceRequest rackLocalRequest=createResourceRequest(GB,1,node.getRackName(),priority.getPriority(),1,true);
ResourceRequest offRackRequest=createResourceRequest(GB,1,ResourceRequest.ANY,priority.getPriority(),1,true);
ask.add(nodeLocalRequest);
ask.add(rackLocalRequest);
ask.add(offRackRequest);
ApplicationAttemptId appAttemptId=createSchedulingRequest("queueA","user1",ask);
scheduler.update();
NodeUpdateSchedulerEvent nodeUpdate=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeUpdate);
assertEquals(1,scheduler.getSchedulerApp(appAttemptId).getLiveContainers().size());
FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId);
Assert.assertNull(app.getResourceRequest(priority,host));
ContainerId containerId1=ContainerId.newInstance(appAttemptId,1);
RMContainer rmContainer=app.getRMContainer(containerId1);
scheduler.warnOrKillContainer(rmContainer);
clock.tick(5);
scheduler.warnOrKillContainer(rmContainer);
List requests=rmContainer.getResourceRequests();
Assert.assertEquals(3,requests.size());
for ( ResourceRequest request : requests) {
Assert.assertEquals(1,app.getResourceRequest(priority,request.getResourceName()).getNumContainers());
}
scheduler.update();
scheduler.handle(nodeUpdate);
List containers=scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,null).getContainers();
Assert.assertTrue(containers.size() == 1);
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testMoveMakesAppRunnable() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueMgr=scheduler.getQueueManager();
FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true);
FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true);
scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue1",0);
ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3);
FSAppAttempt app=scheduler.getSchedulerApp(appAttId);
assertTrue(oldQueue.getNonRunnableAppSchedulables().contains(app));
scheduler.moveApplication(appAttId.getApplicationId(),"queue2");
assertFalse(oldQueue.getNonRunnableAppSchedulables().contains(app));
assertFalse(targetQueue.getNonRunnableAppSchedulables().contains(app));
assertTrue(targetQueue.getRunnableAppSchedulables().contains(app));
assertEquals(1,targetQueue.getNumRunnableApps());
assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps());
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testCancelStrictLocality() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",0);
ResourceRequest nodeRequest=createResourceRequest(1024,node1.getHostName(),1,1,true);
ResourceRequest rackRequest=createResourceRequest(1024,"rack1",1,1,false);
ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false);
createSchedulingRequestExistingApplication(nodeRequest,attId1);
createSchedulingRequestExistingApplication(rackRequest,attId1);
createSchedulingRequestExistingApplication(anyRequest,attId1);
scheduler.update();
NodeUpdateSchedulerEvent node2UpdateEvent=new NodeUpdateSchedulerEvent(node2);
FSAppAttempt app=scheduler.getSchedulerApp(attId1);
for (int i=0; i < 10; i++) {
scheduler.handle(node2UpdateEvent);
assertEquals(0,app.getLiveContainers().size());
}
List update=Arrays.asList(createResourceRequest(1024,node1.getHostName(),1,0,true),createResourceRequest(1024,"rack1",1,0,true),createResourceRequest(1024,ResourceRequest.ANY,1,1,true));
scheduler.allocate(attId1,update,new ArrayList(),null,null);
scheduler.handle(node2UpdateEvent);
assertEquals(1,app.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testQueueMaxAMShare() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("0.2 ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(20480,20),0,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.update();
FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true);
assertEquals("Queue queue1's fair share should be 0",0,queue1.getFairShare().getMemory());
createSchedulingRequest(1 * 1024,"root.default","user1");
scheduler.update();
scheduler.handle(updateEvent);
Resource amResource1=Resource.newInstance(1024,1);
Resource amResource2=Resource.newInstance(2048,2);
Resource amResource3=Resource.newInstance(1860,2);
int amPriority=RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
ApplicationAttemptId attId1=createAppAttemptId(1,1);
createApplicationWithAMResource(attId1,"queue1","user1",amResource1);
createSchedulingRequestExistingApplication(1024,1,amPriority,attId1);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1's AM requests 1024 MB memory",1024,app1.getAMResource().getMemory());
assertEquals("Application1's AM should be running",1,app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 1024 MB memory",1024,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId2=createAppAttemptId(2,1);
createApplicationWithAMResource(attId2,"queue1","user1",amResource1);
createSchedulingRequestExistingApplication(1024,1,amPriority,attId2);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM requests 1024 MB memory",1024,app2.getAMResource().getMemory());
assertEquals("Application2's AM should be running",1,app2.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId3=createAppAttemptId(3,1);
createApplicationWithAMResource(attId3,"queue1","user1",amResource1);
createSchedulingRequestExistingApplication(1024,1,amPriority,attId3);
FSAppAttempt app3=scheduler.getSchedulerApp(attId3);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application3's AM requests 1024 MB memory",1024,app3.getAMResource().getMemory());
assertEquals("Application3's AM should not be running",0,app3.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
createSchedulingRequestExistingApplication(1024,1,attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1 should have two running containers",2,app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent1=new AppAttemptRemovedSchedulerEvent(attId1,RMAppAttemptState.FINISHED,false);
scheduler.update();
scheduler.handle(appRemovedEvent1);
scheduler.handle(updateEvent);
assertEquals("Application1's AM should be finished",0,app1.getLiveContainers().size());
assertEquals("Application3's AM should be running",1,app3.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId4=createAppAttemptId(4,1);
createApplicationWithAMResource(attId4,"queue1","user1",amResource2);
createSchedulingRequestExistingApplication(2048,2,amPriority,attId4);
FSAppAttempt app4=scheduler.getSchedulerApp(attId4);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application4's AM requests 2048 MB memory",2048,app4.getAMResource().getMemory());
assertEquals("Application4's AM should not be running",0,app4.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId5=createAppAttemptId(5,1);
createApplicationWithAMResource(attId5,"queue1","user1",amResource2);
createSchedulingRequestExistingApplication(2048,2,amPriority,attId5);
FSAppAttempt app5=scheduler.getSchedulerApp(attId5);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application5's AM requests 2048 MB memory",2048,app5.getAMResource().getMemory());
assertEquals("Application5's AM should not be running",0,app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent4=new AppAttemptRemovedSchedulerEvent(attId4,RMAppAttemptState.KILLED,false);
scheduler.handle(appRemovedEvent4);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application5's AM should not be running",0,app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent2=new AppAttemptRemovedSchedulerEvent(attId2,RMAppAttemptState.FINISHED,false);
AppAttemptRemovedSchedulerEvent appRemovedEvent3=new AppAttemptRemovedSchedulerEvent(attId3,RMAppAttemptState.FINISHED,false);
scheduler.handle(appRemovedEvent2);
scheduler.handle(appRemovedEvent3);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM should be finished",0,app2.getLiveContainers().size());
assertEquals("Application3's AM should be finished",0,app3.getLiveContainers().size());
assertEquals("Application5's AM should be running",1,app5.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId6=createAppAttemptId(6,1);
createApplicationWithAMResource(attId6,"queue1","user1",amResource3);
createSchedulingRequestExistingApplication(1860,2,amPriority,attId6);
FSAppAttempt app6=scheduler.getSchedulerApp(attId6);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application6's AM should not be running",0,app6.getLiveContainers().size());
assertEquals("Application6's AM requests 2048 MB memory",2048,app6.getAMResource().getMemory());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
AppAttemptRemovedSchedulerEvent appRemovedEvent5=new AppAttemptRemovedSchedulerEvent(attId5,RMAppAttemptState.FINISHED,false);
AppAttemptRemovedSchedulerEvent appRemovedEvent6=new AppAttemptRemovedSchedulerEvent(attId6,RMAppAttemptState.FINISHED,false);
scheduler.handle(appRemovedEvent5);
scheduler.handle(appRemovedEvent6);
scheduler.update();
assertEquals("Queue1's AM resource usage should be 0",0,queue1.getAmResourceUsage().getMemory());
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier
/**
* Test to verify the behavior of{@link FSQueue#assignContainer(FSSchedulerNode)})
* Create two queues under root (fifoQueue and fairParent), and two queues
* under fairParent (fairChild1 and fairChild2). Submit two apps to the
* fifoQueue and one each to the fairChild* queues, all apps requiring 4
* containers each of the total 16 container capacity
* Assert the number of containers for each app after 4, 8, 12 and 16 updates.
* @throws Exception
*/
@Test(timeout=5000) public void testAssignContainer() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
final String user="user1";
final String fifoQueue="fifo";
final String fairParent="fairParent";
final String fairChild1=fairParent + ".fairChild1";
final String fairChild2=fairParent + ".fairChild2";
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8192,8),1,"127.0.0.1");
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8192,8),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent1);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1=createSchedulingRequest(1024,fifoQueue,user,4);
ApplicationAttemptId attId2=createSchedulingRequest(1024,fairChild1,user,4);
ApplicationAttemptId attId3=createSchedulingRequest(1024,fairChild2,user,4);
ApplicationAttemptId attId4=createSchedulingRequest(1024,fifoQueue,user,4);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
FSAppAttempt app3=scheduler.getSchedulerApp(attId3);
FSAppAttempt app4=scheduler.getSchedulerApp(attId4);
scheduler.getQueueManager().getLeafQueue(fifoQueue,true).setPolicy(SchedulingPolicy.parse("fifo"));
scheduler.update();
NodeUpdateSchedulerEvent updateEvent1=new NodeUpdateSchedulerEvent(node1);
NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2);
for (int i=0; i < 8; i++) {
scheduler.handle(updateEvent1);
scheduler.handle(updateEvent2);
if ((i + 1) % 2 == 0) {
String ERR="Wrong number of assigned containers after " + (i + 1) + " updates";
if (i < 4) {
assertEquals(ERR,(i + 1),app1.getLiveContainers().size());
assertEquals(ERR,0,app4.getLiveContainers().size());
}
else {
assertEquals(ERR,4,app1.getLiveContainers().size());
assertEquals(ERR,(i - 3),app4.getLiveContainers().size());
}
assertEquals(ERR,(i + 1) / 2,app2.getLiveContainers().size());
assertEquals(ERR,(i + 1) / 2,app3.getLiveContainers().size());
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLowestCommonAncestorDeeperHierarchy() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
FSQueue aQueue=mock(FSLeafQueue.class);
FSQueue bQueue=mock(FSLeafQueue.class);
FSQueue a1Queue=mock(FSLeafQueue.class);
FSQueue b1Queue=mock(FSLeafQueue.class);
when(a1Queue.getName()).thenReturn("root.queue1.a.a1");
when(b1Queue.getName()).thenReturn("root.queue1.b.b1");
when(aQueue.getChildQueues()).thenReturn(Arrays.asList(a1Queue));
when(bQueue.getChildQueues()).thenReturn(Arrays.asList(b1Queue));
QueueManager queueManager=scheduler.getQueueManager();
FSParentQueue queue1=queueManager.getParentQueue("queue1",true);
queue1.addChildQueue(aQueue);
queue1.addChildQueue(bQueue);
FSQueue ancestorQueue=scheduler.findLowestCommonAncestorQueue(a1Queue,b1Queue);
assertEquals(ancestorQueue,queue1);
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test public void testStrictLocality() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",0);
ResourceRequest nodeRequest=createResourceRequest(1024,node1.getHostName(),1,1,true);
ResourceRequest rackRequest=createResourceRequest(1024,node1.getRackName(),1,1,false);
ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false);
createSchedulingRequestExistingApplication(nodeRequest,attId1);
createSchedulingRequestExistingApplication(rackRequest,attId1);
createSchedulingRequestExistingApplication(anyRequest,attId1);
scheduler.update();
NodeUpdateSchedulerEvent node1UpdateEvent=new NodeUpdateSchedulerEvent(node1);
NodeUpdateSchedulerEvent node2UpdateEvent=new NodeUpdateSchedulerEvent(node2);
FSAppAttempt app=scheduler.getSchedulerApp(attId1);
for (int i=0; i < 10; i++) {
scheduler.handle(node2UpdateEvent);
assertEquals(0,app.getLiveContainers().size());
assertEquals(0,app.getReservedContainers().size());
}
scheduler.handle(node1UpdateEvent);
assertEquals(1,app.getLiveContainers().size());
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testFairShareWithMinAlloc() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println("2048mb,0vcores ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(3 * 1024),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
createSchedulingRequest(2 * 1024,"queueA","user1");
createSchedulingRequest(2 * 1024,"queueB","user1");
scheduler.update();
Collection queues=scheduler.getQueueManager().getLeafQueues();
assertEquals(3,queues.size());
for ( FSLeafQueue p : queues) {
if (p.getName().equals("root.queueA")) {
assertEquals(1024,p.getFairShare().getMemory());
}
else if (p.getName().equals("root.queueB")) {
assertEquals(2048,p.getFairShare().getMemory());
}
}
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testFairShareAndWeightsInNestedUserQueueRule() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println("");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
RMApp rmApp1=new MockRMApp(0,0,RMAppState.NEW);
RMApp rmApp2=new MockRMApp(1,1,RMAppState.NEW);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
int capacity=16 * 1024;
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(capacity),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
createSchedulingRequest(10 * 1024,"root.parentq","user1");
createSchedulingRequest(10 * 1024,"root.parentq","user2");
createSchedulingRequest(10 * 1024,"root.default","user3");
scheduler.update();
scheduler.getQueueManager().getRootQueue().setSteadyFairShare(scheduler.getClusterResource());
scheduler.getQueueManager().getRootQueue().recomputeSteadyShares();
Collection leafQueues=scheduler.getQueueManager().getLeafQueues();
for ( FSLeafQueue leaf : leafQueues) {
if (leaf.getName().equals("root.parentq.user1") || leaf.getName().equals("root.parentq.user2")) {
assertEquals(capacity / 4,leaf.getFairShare().getMemory());
assertEquals(capacity / 4,leaf.getSteadyFairShare().getMemory());
assertEquals(1.0,leaf.getWeights().getWeight(ResourceType.MEMORY),0);
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testPreemptionIsNotDelayedToNextRound() throws Exception {
conf.setLong(FairSchedulerConfiguration.PREEMPTION_INTERVAL,5000);
conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10000);
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"false");
MockClock clock=new MockClock();
scheduler.setClock(clock);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("8 ");
out.println(" ");
out.println(" ");
out.println(" ");
out.println("");
out.println("2 ");
out.println(" ");
out.print("10 ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId app1=createSchedulingRequest(1 * 1024,1,"queueA.queueA1","user1",7,1);
ApplicationAttemptId app2=createSchedulingRequest(1 * 1024,1,"queueB","user2",1,1);
scheduler.update();
NodeUpdateSchedulerEvent nodeUpdate1=new NodeUpdateSchedulerEvent(node1);
for (int i=0; i < 8; i++) {
scheduler.handle(nodeUpdate1);
}
assertEquals(7,scheduler.getSchedulerApp(app1).getLiveContainers().size());
assertEquals(1,scheduler.getSchedulerApp(app2).getLiveContainers().size());
ApplicationAttemptId app3=createSchedulingRequest(1 * 1024,1,"queueA.queueA2","user3",7,1);
scheduler.update();
clock.tick(11);
scheduler.update();
Resource toPreempt=scheduler.resToPreempt(scheduler.getQueueManager().getLeafQueue("queueA.queueA2",false),clock.getTime());
assertEquals(3277,toPreempt.getMemory());
scheduler.preemptResources(toPreempt);
assertEquals(3,scheduler.getSchedulerApp(app1).getPreemptionContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDRFHierarchicalQueues() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,BuilderUtils.newResource(12288,12),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttId1=createSchedulingRequest(3074,1,"queue1.subqueue1","user1",2);
Thread.sleep(3);
FSAppAttempt app1=scheduler.getSchedulerApp(appAttId1);
ApplicationAttemptId appAttId2=createSchedulingRequest(1024,3,"queue1.subqueue1","user1",2);
Thread.sleep(3);
FSAppAttempt app2=scheduler.getSchedulerApp(appAttId2);
ApplicationAttemptId appAttId3=createSchedulingRequest(2048,2,"queue1.subqueue2","user1",2);
Thread.sleep(3);
FSAppAttempt app3=scheduler.getSchedulerApp(appAttId3);
ApplicationAttemptId appAttId4=createSchedulingRequest(1024,2,"queue2","user1",2);
Thread.sleep(3);
FSAppAttempt app4=scheduler.getSchedulerApp(appAttId4);
DominantResourceFairnessPolicy drfPolicy=new DominantResourceFairnessPolicy();
drfPolicy.initialize(scheduler.getClusterResource());
scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
scheduler.getQueueManager().getQueue("queue1.subqueue1").setPolicy(drfPolicy);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app4.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(2,app4.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app3.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(2,app3.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
Assert.assertEquals(1,app2.getLiveContainers().size());
Assert.assertEquals(2,app3.getLiveContainers().size());
Assert.assertEquals(2,app4.getLiveContainers().size());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testContinuousScheduling() throws Exception {
FairScheduler fs=new FairScheduler();
Configuration conf=createConfiguration();
conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true);
fs.setRMContext(resourceManager.getRMContext());
fs.init(conf);
fs.start();
fs.reinitialize(conf,resourceManager.getRMContext());
Assert.assertTrue("Continuous scheduling should be enabled.",fs.isContinuousSchedulingEnabled());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
fs.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
fs.handle(nodeEvent2);
Assert.assertEquals(fs.getClusterResource().getMemory(),16 * 1024);
Assert.assertEquals(fs.getClusterResource().getVirtualCores(),16);
ApplicationAttemptId appAttemptId=createAppAttemptId(this.APP_ID++,this.ATTEMPT_ID++);
fs.addApplication(appAttemptId.getApplicationId(),"queue11","user11",false);
fs.addApplicationAttempt(appAttemptId,false,false);
List ask=new ArrayList();
ResourceRequest request=createResourceRequest(1024,1,ResourceRequest.ANY,1,1,true);
ask.add(request);
fs.allocate(appAttemptId,ask,new ArrayList(),null,null);
Thread.sleep(fs.getConf().getContinuousSchedulingSleepMs() + 500);
FSAppAttempt app=fs.getSchedulerApp(appAttemptId);
while (app.getCurrentConsumption().equals(Resources.none())) {
}
Assert.assertEquals(1024,app.getCurrentConsumption().getMemory());
Assert.assertEquals(1,app.getCurrentConsumption().getVirtualCores());
request=createResourceRequest(1024,1,ResourceRequest.ANY,2,1,true);
ask.clear();
ask.add(request);
fs.allocate(appAttemptId,ask,new ArrayList(),null,null);
while (app.getCurrentConsumption().equals(Resources.createResource(1024,1))) {
}
Assert.assertEquals(2048,app.getCurrentConsumption().getMemory());
Assert.assertEquals(2,app.getCurrentConsumption().getVirtualCores());
Set nodes=new HashSet();
Iterator it=app.getLiveContainers().iterator();
while (it.hasNext()) {
nodes.add(it.next().getContainer().getNodeId());
}
Assert.assertEquals(2,nodes.size());
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testIsStarvedForFairShare() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println(".25 ");
out.println(" ");
out.println("");
out.println(".75 ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
createSchedulingRequest(3 * 1024,"queueA","user1");
scheduler.update();
NodeUpdateSchedulerEvent nodeEvent2=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(nodeEvent2);
createSchedulingRequest(1 * 1024,"queueB","user1");
scheduler.update();
Collection queues=scheduler.getQueueManager().getLeafQueues();
assertEquals(3,queues.size());
for ( FSLeafQueue p : queues) {
if (p.getName().equals("root.queueA")) {
assertEquals(false,scheduler.isStarvedForFairShare(p));
}
else if (p.getName().equals("root.queueB")) {
assertEquals(true,scheduler.isStarvedForFairShare(p));
}
}
scheduler.handle(nodeEvent2);
for ( FSLeafQueue p : queues) {
if (p.getName().equals("root.queueB")) {
assertEquals(false,scheduler.isStarvedForFairShare(p));
}
}
}
InternalCallVerifier EqualityVerifier
@Test public void testSteadyFairShareWithReloadAndNodeAddRemove() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("fair ");
out.println("");
out.println(" drf ");
out.println(" ");
out.println(" 1 ");
out.println(" ");
out.println(" ");
out.println(" 1 ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueManager=scheduler.getQueueManager();
assertEquals(0,queueManager.getLeafQueue("child1",false).getSteadyFairShare().getMemory());
assertEquals(0,queueManager.getLeafQueue("child2",false).getSteadyFairShare().getMemory());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(6144),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
assertEquals(6144,scheduler.getClusterResource().getMemory());
assertEquals(2048,queueManager.getLeafQueue("child1",false).getSteadyFairShare().getMemory());
assertEquals(2048,queueManager.getLeafQueue("child2",false).getSteadyFairShare().getMemory());
out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("fair ");
out.println("");
out.println(" drf ");
out.println(" ");
out.println(" 1 ");
out.println(" ");
out.println(" ");
out.println(" 2 ");
out.println(" ");
out.println(" ");
out.println(" 2 ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
scheduler.reinitialize(conf,resourceManager.getRMContext());
assertEquals(1024,queueManager.getLeafQueue("child1",false).getSteadyFairShare().getMemory());
assertEquals(2048,queueManager.getLeafQueue("child2",false).getSteadyFairShare().getMemory());
assertEquals(2048,queueManager.getLeafQueue("child3",false).getSteadyFairShare().getMemory());
NodeRemovedSchedulerEvent nodeEvent2=new NodeRemovedSchedulerEvent(node1);
scheduler.handle(nodeEvent2);
assertEquals(0,scheduler.getClusterResource().getMemory());
assertEquals(0,queueManager.getLeafQueue("child1",false).getSteadyFairShare().getMemory());
assertEquals(0,queueManager.getLeafQueue("child2",false).getSteadyFairShare().getMemory());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNoMoreCpuOnNode() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(2048,1),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId=createSchedulingRequest(1024,1,"default","user1",2);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLowestCommonAncestorRootParent() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
FSLeafQueue aQueue=mock(FSLeafQueue.class);
FSLeafQueue bQueue=mock(FSLeafQueue.class);
when(aQueue.getName()).thenReturn("root.a");
when(bQueue.getName()).thenReturn("root.b");
QueueManager queueManager=scheduler.getQueueManager();
FSParentQueue queue1=queueManager.getParentQueue("root",false);
queue1.addChildQueue(aQueue);
queue1.addChildQueue(bQueue);
FSQueue ancestorQueue=scheduler.findLowestCommonAncestorQueue(aQueue,bQueue);
assertEquals(ancestorQueue,queue1);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testSimpleContainerAllocation() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024,4),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(512,2),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
createSchedulingRequest(512,2,"queue1","user1",2);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
assertEquals(FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB,scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getMemory());
NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2);
scheduler.handle(updateEvent2);
assertEquals(1024,scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getMemory());
assertEquals(2,scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getVirtualCores());
QueueMetrics queue1Metrics=scheduler.getQueueManager().getQueue("queue1").getMetrics();
assertEquals(1024,queue1Metrics.getAllocatedMB());
assertEquals(2,queue1Metrics.getAllocatedVirtualCores());
assertEquals(1024,scheduler.getRootQueueMetrics().getAllocatedMB());
assertEquals(2,scheduler.getRootQueueMetrics().getAllocatedVirtualCores());
assertEquals(512,scheduler.getRootQueueMetrics().getAvailableMB());
assertEquals(4,scheduler.getRootQueueMetrics().getAvailableVirtualCores());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@SuppressWarnings("unchecked") @Test public void testNotAllowSubmitApplication() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println(" ");
out.println(" ");
out.println(" ");
out.println(" userallow ");
out.println(" userallow ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
int appId=this.APP_ID++;
String user="usernotallow";
String queue="queue1";
ApplicationId applicationId=MockApps.newAppID(appId);
String name=MockApps.newAppName();
ApplicationMasterService masterService=new ApplicationMasterService(resourceManager.getRMContext(),scheduler);
ApplicationSubmissionContext submissionContext=new ApplicationSubmissionContextPBImpl();
ContainerLaunchContext clc=BuilderUtils.newContainerLaunchContext(null,null,null,null,null,null);
submissionContext.setApplicationId(applicationId);
submissionContext.setAMContainerSpec(clc);
RMApp application=new RMAppImpl(applicationId,resourceManager.getRMContext(),conf,name,user,queue,submissionContext,scheduler,masterService,System.currentTimeMillis(),"YARN",null);
resourceManager.getRMContext().getRMApps().putIfAbsent(applicationId,application);
application.handle(new RMAppEvent(applicationId,RMAppEventType.START));
final int MAX_TRIES=20;
int numTries=0;
while (!application.getState().equals(RMAppState.SUBMITTED) && numTries < MAX_TRIES) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ex) {
ex.printStackTrace();
}
numTries++;
}
assertEquals("The application doesn't reach SUBMITTED.",RMAppState.SUBMITTED,application.getState());
ApplicationAttemptId attId=ApplicationAttemptId.newInstance(applicationId,this.ATTEMPT_ID++);
scheduler.addApplication(attId.getApplicationId(),queue,user,false);
numTries=0;
while (application.getFinishTime() == 0 && numTries < MAX_TRIES) {
try {
Thread.sleep(100);
}
catch ( InterruptedException ex) {
ex.printStackTrace();
}
numTries++;
}
assertEquals(FinalApplicationStatus.FAILED,application.getFinalApplicationStatus());
}
InternalCallVerifier EqualityVerifier
@Test public void testDontAllowUndeclaredPools() throws Exception {
conf.setBoolean(FairSchedulerConfiguration.ALLOW_UNDECLARED_POOLS,false);
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueManager=scheduler.getQueueManager();
FSLeafQueue jerryQueue=queueManager.getLeafQueue("jerry",false);
FSLeafQueue defaultQueue=queueManager.getLeafQueue("default",false);
createSchedulingRequest(1024,"jerry","someuser");
assertEquals(1,jerryQueue.getRunnableAppSchedulables().size());
createSchedulingRequest(1024,"newqueue","someuser");
assertEquals(1,jerryQueue.getRunnableAppSchedulables().size());
assertEquals(1,defaultQueue.getRunnableAppSchedulables().size());
createSchedulingRequest(1024,"default","someuser");
assertEquals(1,jerryQueue.getRunnableAppSchedulables().size());
assertEquals(2,defaultQueue.getRunnableAppSchedulables().size());
createSchedulingRequest(1024,"default","jerry");
assertEquals(2,jerryQueue.getRunnableAppSchedulables().size());
assertEquals(2,defaultQueue.getRunnableAppSchedulables().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* If we update our ask to strictly request a node, it doesn't make sense to keep
* a reservation on another.
*/
@Test public void testReservationsStrictLocality() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1");
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId=createSchedulingRequest(1024,"queue1","user1",0);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
ResourceRequest nodeRequest=createResourceRequest(1024,node2.getHostName(),1,2,true);
ResourceRequest rackRequest=createResourceRequest(1024,"rack1",1,2,true);
ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,2,false);
createSchedulingRequestExistingApplication(nodeRequest,attId);
createSchedulingRequestExistingApplication(rackRequest,attId);
createSchedulingRequestExistingApplication(anyRequest,attId);
scheduler.update();
NodeUpdateSchedulerEvent nodeUpdateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(nodeUpdateEvent);
assertEquals(1,app.getLiveContainers().size());
scheduler.handle(nodeUpdateEvent);
assertEquals(1,app.getReservedContainers().size());
rackRequest=createResourceRequest(1024,"rack1",1,1,false);
anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false);
scheduler.allocate(attId,Arrays.asList(rackRequest,anyRequest),new ArrayList(),null,null);
scheduler.handle(nodeUpdateEvent);
assertEquals(0,app.getReservedContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* Two apps on one queue, one app on another
*/
@Test public void testBasicDRFWithQueues() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,BuilderUtils.newResource(8192,7),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttId1=createSchedulingRequest(3072,1,"queue1","user1",2);
FSAppAttempt app1=scheduler.getSchedulerApp(appAttId1);
ApplicationAttemptId appAttId2=createSchedulingRequest(2048,2,"queue1","user1",2);
FSAppAttempt app2=scheduler.getSchedulerApp(appAttId2);
ApplicationAttemptId appAttId3=createSchedulingRequest(1024,2,"queue2","user1",2);
FSAppAttempt app3=scheduler.getSchedulerApp(appAttId3);
DominantResourceFairnessPolicy drfPolicy=new DominantResourceFairnessPolicy();
drfPolicy.initialize(scheduler.getClusterResource());
scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(updateEvent);
Assert.assertEquals(1,app1.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app3.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(2,app3.getLiveContainers().size());
scheduler.handle(updateEvent);
Assert.assertEquals(1,app2.getLiveContainers().size());
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=5000) public void testPreemptionDecision() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
MockClock clock=new MockClock();
scheduler.setClock(clock);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("0mb,0vcores ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println("1024mb,0vcores ");
out.println(" ");
out.println("");
out.println(".25 ");
out.println("1024mb,0vcores ");
out.println(" ");
out.print("5 ");
out.print("10 ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),2,"127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
RMNode node3=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),3,"127.0.0.3");
NodeAddedSchedulerEvent nodeEvent3=new NodeAddedSchedulerEvent(node3);
scheduler.handle(nodeEvent3);
ApplicationAttemptId app1=createSchedulingRequest(1 * 1024,"queueA","user1",1,1);
ApplicationAttemptId app2=createSchedulingRequest(1 * 1024,"queueA","user1",1,2);
ApplicationAttemptId app3=createSchedulingRequest(1 * 1024,"queueA","user1",1,3);
ApplicationAttemptId app4=createSchedulingRequest(1 * 1024,"queueB","user1",1,1);
ApplicationAttemptId app5=createSchedulingRequest(1 * 1024,"queueB","user1",1,2);
ApplicationAttemptId app6=createSchedulingRequest(1 * 1024,"queueB","user1",1,3);
scheduler.update();
for (int i=0; i < 2; i++) {
NodeUpdateSchedulerEvent nodeUpdate1=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(nodeUpdate1);
NodeUpdateSchedulerEvent nodeUpdate2=new NodeUpdateSchedulerEvent(node2);
scheduler.handle(nodeUpdate2);
NodeUpdateSchedulerEvent nodeUpdate3=new NodeUpdateSchedulerEvent(node3);
scheduler.handle(nodeUpdate3);
}
ApplicationAttemptId app7=createSchedulingRequest(1 * 1024,"queueC","user1",1,1);
ApplicationAttemptId app8=createSchedulingRequest(1 * 1024,"queueC","user1",1,2);
ApplicationAttemptId app9=createSchedulingRequest(1 * 1024,"queueC","user1",1,3);
ApplicationAttemptId app10=createSchedulingRequest(1 * 1024,"queueD","user1",1,1);
ApplicationAttemptId app11=createSchedulingRequest(1 * 1024,"queueD","user1",1,2);
ApplicationAttemptId app12=createSchedulingRequest(1 * 1024,"queueD","user1",1,3);
scheduler.update();
FSLeafQueue schedC=scheduler.getQueueManager().getLeafQueue("queueC",true);
FSLeafQueue schedD=scheduler.getQueueManager().getLeafQueue("queueD",true);
assertTrue(Resources.equals(Resources.none(),scheduler.resToPreempt(schedC,clock.getTime())));
assertTrue(Resources.equals(Resources.none(),scheduler.resToPreempt(schedD,clock.getTime())));
clock.tick(6);
assertEquals(1024,scheduler.resToPreempt(schedC,clock.getTime()).getMemory());
assertEquals(1024,scheduler.resToPreempt(schedD,clock.getTime()).getMemory());
scheduler.update();
clock.tick(6);
assertEquals(1536,scheduler.resToPreempt(schedC,clock.getTime()).getMemory());
assertEquals(1536,scheduler.resToPreempt(schedD,clock.getTime()).getMemory());
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testIsStarvedForMinShare() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("2048mb,0vcores ");
out.println(" ");
out.println("");
out.println("2048mb,0vcores ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
createSchedulingRequest(3 * 1024,"queueA","user1");
scheduler.update();
NodeUpdateSchedulerEvent nodeEvent2=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(nodeEvent2);
createSchedulingRequest(1 * 1024,"queueB","user1");
scheduler.update();
Collection queues=scheduler.getQueueManager().getLeafQueues();
assertEquals(3,queues.size());
for ( FSLeafQueue p : queues) {
if (p.getName().equals("root.queueA")) {
assertEquals(false,scheduler.isStarvedForMinShare(p));
}
else if (p.getName().equals("root.queueB")) {
assertEquals(true,scheduler.isStarvedForMinShare(p));
}
}
scheduler.handle(nodeEvent2);
for ( FSLeafQueue p : queues) {
if (p.getName().equals("root.queueB")) {
assertEquals(false,scheduler.isStarvedForMinShare(p));
}
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testMaxAssign() throws Exception {
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,true);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16384,16),0,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId attId=createSchedulingRequest(1024,"root.default","user",8);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
scheduler.maxAssign=2;
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",2,app.getLiveContainers().size());
scheduler.maxAssign=-1;
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",8,app.getLiveContainers().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testSimpleFairShareCalculation() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(10 * 1024),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
createSchedulingRequest(10 * 1024,"queue1","user1");
createSchedulingRequest(10 * 1024,"queue2","user1");
createSchedulingRequest(10 * 1024,"root.default","user1");
scheduler.update();
scheduler.getQueueManager().getRootQueue().setSteadyFairShare(scheduler.getClusterResource());
scheduler.getQueueManager().getRootQueue().recomputeSteadyShares();
Collection queues=scheduler.getQueueManager().getLeafQueues();
assertEquals(3,queues.size());
for ( FSLeafQueue p : queues) {
assertEquals(3414,p.getFairShare().getMemory());
assertEquals(3414,p.getMetrics().getFairShareMB());
assertEquals(3414,p.getSteadyFairShare().getMemory());
assertEquals(3414,p.getMetrics().getSteadyFairShareMB());
}
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=30000) public void testHostPortNodeName() throws Exception {
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,true);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1",1);
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.1",2);
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",0);
ResourceRequest nodeRequest=createResourceRequest(1024,node1.getNodeID().getHost() + ":" + node1.getNodeID().getPort(),1,1,true);
ResourceRequest rackRequest=createResourceRequest(1024,node1.getRackName(),1,1,false);
ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false);
createSchedulingRequestExistingApplication(nodeRequest,attId1);
createSchedulingRequestExistingApplication(rackRequest,attId1);
createSchedulingRequestExistingApplication(anyRequest,attId1);
scheduler.update();
NodeUpdateSchedulerEvent node1UpdateEvent=new NodeUpdateSchedulerEvent(node1);
NodeUpdateSchedulerEvent node2UpdateEvent=new NodeUpdateSchedulerEvent(node2);
FSAppAttempt app=scheduler.getSchedulerApp(attId1);
for (int i=0; i < 10; i++) {
scheduler.handle(node2UpdateEvent);
assertEquals(0,app.getLiveContainers().size());
assertEquals(0,app.getReservedContainers().size());
}
scheduler.handle(node1UpdateEvent);
assertEquals(1,app.getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@SuppressWarnings("resource") @Test public void testBlacklistNodes() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
final int GB=1024;
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * GB,16),0,host);
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId appAttemptId=createSchedulingRequest(GB,"root.default","user",1);
FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId);
scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
assertTrue(app.isBlacklisted(host));
scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
assertFalse(scheduler.getSchedulerApp(appAttemptId).isBlacklisted(host));
List update=Arrays.asList(createResourceRequest(GB,node.getHostName(),1,0,true));
scheduler.allocate(appAttemptId,update,Collections.emptyList(),Collections.singletonList(host),null);
assertTrue(app.isBlacklisted(host));
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",0,app.getLiveContainers().size());
scheduler.allocate(appAttemptId,update,Collections.emptyList(),null,Collections.singletonList(host));
assertFalse(app.isBlacklisted(host));
createSchedulingRequest(GB,"root.default","user",1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",1,app.getLiveContainers().size());
}
InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier
@Test(expected=YarnException.class) public void testMoveWouldViolateMaxResourcesConstraints() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueMgr=scheduler.getQueueManager();
FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true);
queueMgr.getLeafQueue("queue2",true);
scheduler.getAllocationConfiguration().maxQueueResources.put("root.queue2",Resource.newInstance(1024,1));
ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3);
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(2048,2));
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.handle(updateEvent);
scheduler.handle(updateEvent);
assertEquals(Resource.newInstance(2048,2),oldQueue.getResourceUsage());
scheduler.moveApplication(appAttId.getApplicationId(),"queue2");
}
InternalCallVerifier EqualityVerifier
@Test public void testMoveNonRunnableApp() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueMgr=scheduler.getQueueManager();
FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true);
FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true);
scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue1",0);
scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue2",0);
ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3);
assertEquals(0,oldQueue.getNumRunnableApps());
scheduler.moveApplication(appAttId.getApplicationId(),"queue2");
assertEquals(0,oldQueue.getNumRunnableApps());
assertEquals(0,targetQueue.getNumRunnableApps());
assertEquals(0,queueMgr.getRootQueue().getNumRunnableApps());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testLowestCommonAncestorForNonRootParent() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
FSLeafQueue aQueue=mock(FSLeafQueue.class);
FSLeafQueue bQueue=mock(FSLeafQueue.class);
when(aQueue.getName()).thenReturn("root.queue1.a");
when(bQueue.getName()).thenReturn("root.queue1.b");
QueueManager queueManager=scheduler.getQueueManager();
FSParentQueue queue1=queueManager.getParentQueue("queue1",true);
queue1.addChildQueue(aQueue);
queue1.addChildQueue(bQueue);
FSQueue ancestorQueue=scheduler.findLowestCommonAncestorQueue(aQueue,bQueue);
assertEquals(ancestorQueue,queue1);
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testMultipleNodesSingleRackRequest() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1");
RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2");
RMNode node3=MockNodes.newNodeInfo(2,Resources.createResource(1024),3,"127.0.0.3");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
ApplicationAttemptId appId=createAppAttemptId(this.APP_ID++,this.ATTEMPT_ID++);
scheduler.addApplication(appId.getApplicationId(),"queue1","user1",false);
scheduler.addApplicationAttempt(appId,false,false);
List asks=new ArrayList();
asks.add(createResourceRequest(1024,node1.getHostName(),1,1,true));
asks.add(createResourceRequest(1024,node2.getHostName(),1,1,true));
asks.add(createResourceRequest(1024,node3.getHostName(),1,1,true));
asks.add(createResourceRequest(1024,node1.getRackName(),1,1,true));
asks.add(createResourceRequest(1024,node3.getRackName(),1,1,true));
asks.add(createResourceRequest(1024,ResourceRequest.ANY,1,2,true));
scheduler.allocate(appId,asks,new ArrayList(),null,null);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent1=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent1);
assertEquals(1,scheduler.getSchedulerApp(appId).getLiveContainers().size());
scheduler.update();
NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2);
scheduler.handle(updateEvent2);
assertEquals(2,scheduler.getSchedulerApp(appId).getLiveContainers().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=3000) public void testMaxAssignWithZeroMemoryContainers() throws Exception {
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,true);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,0);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16384,16),0,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
ApplicationAttemptId attId=createSchedulingRequest(0,1,"root.default","user",8);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
scheduler.maxAssign=2;
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",2,app.getLiveContainers().size());
scheduler.maxAssign=-1;
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Incorrect number of containers allocated",8,app.getLiveContainers().size());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testHierarchicalQueueAllocationFileParsing() throws IOException, SAXException, AllocationConfigurationException, ParserConfigurationException {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println("2048mb,0vcores ");
out.println(" ");
out.println("");
out.println("2048mb,0vcores ");
out.println("");
out.println("2048mb,0vcores ");
out.println(" ");
out.println("");
out.println("2048mb,0vcores ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueManager=scheduler.getQueueManager();
Collection leafQueues=queueManager.getLeafQueues();
Assert.assertEquals(4,leafQueues.size());
Assert.assertNotNull(queueManager.getLeafQueue("queueA",false));
Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueC",false));
Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueD",false));
Assert.assertNotNull(queueManager.getLeafQueue("default",false));
Assert.assertEquals(4,leafQueues.size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testReservationThatDoesntFit() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId=createSchedulingRequest(2048,"queue1","user1",1);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
assertEquals(0,app.getLiveContainers().size());
assertEquals(0,app.getReservedContainers().size());
createSchedulingRequestExistingApplication(1024,2,attId);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
assertEquals(0,app.getReservedContainers().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testNonMinZeroResourcesSettings() throws IOException {
FairScheduler fs=new FairScheduler();
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,256);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,1);
conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB,512);
conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES,2);
fs.init(conf);
fs.reinitialize(conf,null);
Assert.assertEquals(256,fs.getMinimumResourceCapability().getMemory());
Assert.assertEquals(1,fs.getMinimumResourceCapability().getVirtualCores());
Assert.assertEquals(512,fs.getIncrementResourceCapability().getMemory());
Assert.assertEquals(2,fs.getIncrementResourceCapability().getVirtualCores());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAssignToQueue() throws Exception {
conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"true");
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMApp rmApp1=new MockRMApp(0,0,RMAppState.NEW);
RMApp rmApp2=new MockRMApp(1,1,RMAppState.NEW);
FSLeafQueue queue1=scheduler.assignToQueue(rmApp1,"default","asterix");
FSLeafQueue queue2=scheduler.assignToQueue(rmApp2,"notdefault","obelix");
assertEquals(rmApp1.getQueue(),queue1.getName());
assertEquals("root.asterix",rmApp1.getQueue());
assertEquals(rmApp2.getQueue(),queue2.getName());
assertEquals("root.notdefault",rmApp2.getQueue());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testReservationWhileMultiplePriorities() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024,4),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId=createSchedulingRequest(1024,4,"queue1","user1",1,2);
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
FSAppAttempt app=scheduler.getSchedulerApp(attId);
assertEquals(1,app.getLiveContainers().size());
ContainerId containerId=scheduler.getSchedulerApp(attId).getLiveContainers().iterator().next().getContainerId();
createSchedulingRequestExistingApplication(1024,4,2,attId);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
assertEquals(0,scheduler.getRootQueueMetrics().getAvailableMB());
assertEquals(0,scheduler.getRootQueueMetrics().getAvailableVirtualCores());
createSchedulingRequestExistingApplication(1024,4,1,attId);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals(1,app.getLiveContainers().size());
for ( RMContainer container : app.getReservedContainers()) {
assertEquals(2,container.getReservedPriority().getPriority());
}
scheduler.allocate(attId,new ArrayList(),Arrays.asList(containerId),null,null);
assertEquals(1024,scheduler.getRootQueueMetrics().getAvailableMB());
assertEquals(4,scheduler.getRootQueueMetrics().getAvailableVirtualCores());
scheduler.update();
scheduler.handle(updateEvent);
Collection liveContainers=app.getLiveContainers();
assertEquals(1,liveContainers.size());
for ( RMContainer liveContainer : liveContainers) {
Assert.assertEquals(2,liveContainer.getContainer().getPriority().getPriority());
}
assertEquals(0,scheduler.getRootQueueMetrics().getAvailableMB());
assertEquals(0,scheduler.getRootQueueMetrics().getAvailableVirtualCores());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testHierarchicalQueuesSimilarParents() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueManager=scheduler.getQueueManager();
FSLeafQueue leafQueue=queueManager.getLeafQueue("parent.child",true);
Assert.assertEquals(2,queueManager.getLeafQueues().size());
Assert.assertNotNull(leafQueue);
Assert.assertEquals("root.parent.child",leafQueue.getName());
FSLeafQueue leafQueue2=queueManager.getLeafQueue("parent",true);
Assert.assertNull(leafQueue2);
Assert.assertEquals(2,queueManager.getLeafQueues().size());
FSLeafQueue leafQueue3=queueManager.getLeafQueue("parent.child.grandchild",true);
Assert.assertNull(leafQueue3);
Assert.assertEquals(2,queueManager.getLeafQueues().size());
FSLeafQueue leafQueue4=queueManager.getLeafQueue("parent.sister",true);
Assert.assertNotNull(leafQueue4);
Assert.assertEquals("root.parent.sister",leafQueue4.getName());
Assert.assertEquals(3,queueManager.getLeafQueues().size());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier
@Test public void testMoveRunnableApp() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueMgr=scheduler.getQueueManager();
FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true);
FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true);
ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3);
ApplicationId appId=appAttId.getApplicationId();
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(1024));
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.handle(updateEvent);
assertEquals(Resource.newInstance(1024,1),oldQueue.getResourceUsage());
scheduler.update();
assertEquals(Resource.newInstance(3072,3),oldQueue.getDemand());
scheduler.moveApplication(appId,"queue2");
FSAppAttempt app=scheduler.getSchedulerApp(appAttId);
assertSame(targetQueue,app.getQueue());
assertFalse(oldQueue.getRunnableAppSchedulables().contains(app));
assertTrue(targetQueue.getRunnableAppSchedulables().contains(app));
assertEquals(Resource.newInstance(0,0),oldQueue.getResourceUsage());
assertEquals(Resource.newInstance(1024,1),targetQueue.getResourceUsage());
assertEquals(0,oldQueue.getNumRunnableApps());
assertEquals(1,targetQueue.getNumRunnableApps());
assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps());
scheduler.update();
assertEquals(Resource.newInstance(0,0),oldQueue.getDemand());
assertEquals(Resource.newInstance(3072,3),targetQueue.getDemand());
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testConfigureRootQueue() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("fair ");
out.println("");
out.println(" drf ");
out.println(" ");
out.println(" 1024mb,1vcores ");
out.println(" ");
out.println(" ");
out.println(" 1024mb,4vcores ");
out.println(" ");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
QueueManager queueManager=scheduler.getQueueManager();
FSQueue root=queueManager.getRootQueue();
assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy);
assertNotNull(queueManager.getLeafQueue("child1",false));
assertNotNull(queueManager.getLeafQueue("child2",false));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testQueueMaxAMShareDefault() throws Exception {
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE);
PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE));
out.println("");
out.println("");
out.println("");
out.println(" ");
out.println("");
out.println("1.0 ");
out.println(" ");
out.println("");
out.println(" ");
out.println("");
out.println(" ");
out.println("");
out.println(" ");
out.println(" ");
out.close();
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(8192,20),0,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node);
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node);
scheduler.handle(nodeEvent);
scheduler.update();
FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true);
assertEquals("Queue queue1's fair share should be 0",0,queue1.getFairShare().getMemory());
FSLeafQueue queue2=scheduler.getQueueManager().getLeafQueue("queue2",true);
assertEquals("Queue queue2's fair share should be 0",0,queue2.getFairShare().getMemory());
FSLeafQueue queue3=scheduler.getQueueManager().getLeafQueue("queue3",true);
assertEquals("Queue queue3's fair share should be 0",0,queue3.getFairShare().getMemory());
FSLeafQueue queue4=scheduler.getQueueManager().getLeafQueue("queue4",true);
assertEquals("Queue queue4's fair share should be 0",0,queue4.getFairShare().getMemory());
FSLeafQueue queue5=scheduler.getQueueManager().getLeafQueue("queue5",true);
assertEquals("Queue queue5's fair share should be 0",0,queue5.getFairShare().getMemory());
List queues=Arrays.asList("root.default","root.queue3","root.queue4","root.queue5");
for ( String queue : queues) {
createSchedulingRequest(1 * 1024,queue,"user1");
scheduler.update();
scheduler.handle(updateEvent);
}
Resource amResource1=Resource.newInstance(2048,1);
int amPriority=RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority();
ApplicationAttemptId attId1=createAppAttemptId(1,1);
createApplicationWithAMResource(attId1,"queue1","test1",amResource1);
createSchedulingRequestExistingApplication(2048,1,amPriority,attId1);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application1's AM requests 2048 MB memory",2048,app1.getAMResource().getMemory());
assertEquals("Application1's AM should be running",1,app1.getLiveContainers().size());
assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory());
ApplicationAttemptId attId2=createAppAttemptId(2,1);
createApplicationWithAMResource(attId2,"queue2","test1",amResource1);
createSchedulingRequestExistingApplication(2048,1,amPriority,attId2);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
scheduler.update();
scheduler.handle(updateEvent);
assertEquals("Application2's AM requests 2048 MB memory",2048,app2.getAMResource().getMemory());
assertEquals("Application2's AM should not be running",0,app2.getLiveContainers().size());
assertEquals("Queue2's AM resource usage should be 0 MB memory",0,queue2.getAmResourceUsage().getMemory());
}
InternalCallVerifier EqualityVerifier
@Test public void testMinZeroResourcesSettings() throws IOException {
FairScheduler fs=new FairScheduler();
YarnConfiguration conf=new YarnConfiguration();
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,0);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,0);
conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB,512);
conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES,2);
fs.init(conf);
fs.reinitialize(conf,null);
Assert.assertEquals(0,fs.getMinimumResourceCapability().getMemory());
Assert.assertEquals(0,fs.getMinimumResourceCapability().getVirtualCores());
Assert.assertEquals(512,fs.getIncrementResourceCapability().getMemory());
Assert.assertEquals(2,fs.getIncrementResourceCapability().getVirtualCores());
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testEmptyQueueName() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
assertEquals(1,scheduler.getQueueManager().getLeafQueues().size());
ApplicationAttemptId appAttemptId=createAppAttemptId(1,1);
AppAddedSchedulerEvent appAddedEvent=new AppAddedSchedulerEvent(appAttemptId.getApplicationId(),"","user1");
scheduler.handle(appAddedEvent);
assertEquals(1,scheduler.getQueueManager().getLeafQueues().size());
assertNull(scheduler.getSchedulerApp(appAttemptId));
assertEquals(0,resourceManager.getRMContext().getRMApps().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testFifoWithinQueue() throws Exception {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(3072,3),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",2);
ApplicationAttemptId attId2=createSchedulingRequest(1024,"queue1","user1",2);
FSAppAttempt app1=scheduler.getSchedulerApp(attId1);
FSAppAttempt app2=scheduler.getSchedulerApp(attId2);
FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true);
queue1.setPolicy(new FifoPolicy());
scheduler.update();
NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1);
scheduler.handle(updateEvent);
assertEquals(1,app1.getLiveContainers().size());
assertEquals(0,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(2,app1.getLiveContainers().size());
assertEquals(0,app2.getLiveContainers().size());
scheduler.handle(updateEvent);
assertEquals(2,app1.getLiveContainers().size());
assertEquals(1,app2.getLiveContainers().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testSimpleHierarchicalFairShareCalculation() throws IOException {
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,resourceManager.getRMContext());
int capacity=10 * 24;
RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(capacity),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
createSchedulingRequest(10 * 1024,"parent.queue2","user1");
createSchedulingRequest(10 * 1024,"parent.queue3","user1");
createSchedulingRequest(10 * 1024,"root.default","user1");
scheduler.update();
scheduler.getQueueManager().getRootQueue().setSteadyFairShare(scheduler.getClusterResource());
scheduler.getQueueManager().getRootQueue().recomputeSteadyShares();
QueueManager queueManager=scheduler.getQueueManager();
Collection queues=queueManager.getLeafQueues();
assertEquals(3,queues.size());
FSLeafQueue queue1=queueManager.getLeafQueue("default",true);
FSLeafQueue queue2=queueManager.getLeafQueue("parent.queue2",true);
FSLeafQueue queue3=queueManager.getLeafQueue("parent.queue3",true);
assertEquals(capacity / 2,queue1.getFairShare().getMemory());
assertEquals(capacity / 2,queue1.getMetrics().getFairShareMB());
assertEquals(capacity / 2,queue1.getSteadyFairShare().getMemory());
assertEquals(capacity / 2,queue1.getMetrics().getSteadyFairShareMB());
assertEquals(capacity / 4,queue2.getFairShare().getMemory());
assertEquals(capacity / 4,queue2.getMetrics().getFairShareMB());
assertEquals(capacity / 4,queue2.getSteadyFairShare().getMemory());
assertEquals(capacity / 4,queue2.getMetrics().getSteadyFairShareMB());
assertEquals(capacity / 4,queue3.getFairShare().getMemory());
assertEquals(capacity / 4,queue3.getMetrics().getFairShareMB());
assertEquals(capacity / 4,queue3.getSteadyFairShare().getMemory());
assertEquals(capacity / 4,queue3.getMetrics().getSteadyFairShareMB());
}
InternalCallVerifier BooleanVerifier
/**
* Make sure the scheduler creates the event log.
*/
@Test public void testCreateEventLog() throws IOException {
FairSchedulerEventLog eventLog=scheduler.getEventLog();
logFile=new File(eventLog.getLogFile());
Assert.assertTrue(logFile.exists());
}
IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testFairShareWithDRFMultipleActiveQueuesUnderDifferentParent() throws IOException {
int nodeMem=16 * 1024;
int nodeVCores=10;
createClusterWithQueuesAndOneNode(nodeMem,nodeVCores,"drf");
createSchedulingRequest(2 * 1024,"root.parentA.childA1","user1");
createSchedulingRequest(3 * 1024,"root.parentA.childA2","user2");
createSchedulingRequest(1 * 1024,"root.parentB.childB1","user3");
createSchedulingRequest(1 * 1024,"root.default","user4");
scheduler.update();
for (int i=1; i <= 2; i++) {
assertEquals(40,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA" + i,false).getFairShare().getMemory() / nodeMem * 100,.9);
assertEquals(40,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA" + i,false).getFairShare().getVirtualCores() / nodeVCores * 100,.9);
}
assertEquals(10,(double)scheduler.getQueueManager().getLeafQueue("root.parentB.childB1",false).getFairShare().getMemory() / nodeMem * 100,.9);
assertEquals(10,(double)scheduler.getQueueManager().getLeafQueue("root.parentB.childB1",false).getFairShare().getVirtualCores() / nodeVCores * 100,.9);
Collection leafQueues=scheduler.getQueueManager().getLeafQueues();
for ( FSLeafQueue leaf : leafQueues) {
if (leaf.getName().startsWith("root.parentA")) {
assertEquals(0.2,(double)leaf.getSteadyFairShare().getMemory() / nodeMem,0.001);
assertEquals(0.2,(double)leaf.getSteadyFairShare().getVirtualCores() / nodeVCores,0.001);
}
else if (leaf.getName().startsWith("root.parentB")) {
assertEquals(0.05,(double)leaf.getSteadyFairShare().getMemory() / nodeMem,0.001);
assertEquals(0.1,(double)leaf.getSteadyFairShare().getVirtualCores() / nodeVCores,0.001);
}
}
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testFairShareNoAppsRunning() throws IOException {
int nodeCapacity=16 * 1024;
createClusterWithQueuesAndOneNode(nodeCapacity,"fair");
scheduler.update();
Collection leafQueues=scheduler.getQueueManager().getLeafQueues();
for ( FSLeafQueue leaf : leafQueues) {
if (leaf.getName().startsWith("root.parentA")) {
assertEquals(0,(double)leaf.getFairShare().getMemory() / nodeCapacity,0);
}
else if (leaf.getName().startsWith("root.parentB")) {
assertEquals(0,(double)leaf.getFairShare().getMemory() / nodeCapacity,0);
}
}
verifySteadyFairShareMemory(leafQueues,nodeCapacity);
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveDoesNotEnableAnyApp(){
FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1",true);
FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue2",true);
queueMaxApps.put("root",2);
queueMaxApps.put("root.queue1",1);
queueMaxApps.put("root.queue2",1);
FSAppAttempt app1=addApp(leaf1,"user");
addApp(leaf2,"user");
addApp(leaf2,"user");
assertEquals(1,leaf1.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getNonRunnableAppSchedulables().size());
removeApp(app1);
assertEquals(0,leaf1.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getNonRunnableAppSchedulables().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveEnablesAppOnCousinQueue(){
FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1.subqueue1.leaf1",true);
FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue1.subqueue2.leaf2",true);
queueMaxApps.put("root.queue1",2);
FSAppAttempt app1=addApp(leaf1,"user");
addApp(leaf2,"user");
addApp(leaf2,"user");
assertEquals(1,leaf1.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getNonRunnableAppSchedulables().size());
removeApp(app1);
assertEquals(0,leaf1.getRunnableAppSchedulables().size());
assertEquals(2,leaf2.getRunnableAppSchedulables().size());
assertEquals(0,leaf2.getNonRunnableAppSchedulables().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveEnablesOneByQueueOneByUser(){
FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1.leaf1",true);
FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue1.leaf2",true);
queueMaxApps.put("root.queue1.leaf1",2);
userMaxApps.put("user1",1);
FSAppAttempt app1=addApp(leaf1,"user1");
addApp(leaf1,"user2");
addApp(leaf1,"user3");
addApp(leaf2,"user1");
assertEquals(2,leaf1.getRunnableAppSchedulables().size());
assertEquals(1,leaf1.getNonRunnableAppSchedulables().size());
assertEquals(1,leaf2.getNonRunnableAppSchedulables().size());
removeApp(app1);
assertEquals(2,leaf1.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getRunnableAppSchedulables().size());
assertEquals(0,leaf1.getNonRunnableAppSchedulables().size());
assertEquals(0,leaf2.getNonRunnableAppSchedulables().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testMultipleAppsWaitingOnCousinQueue(){
FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1.subqueue1.leaf1",true);
FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue1.subqueue2.leaf2",true);
queueMaxApps.put("root.queue1",2);
FSAppAttempt app1=addApp(leaf1,"user");
addApp(leaf2,"user");
addApp(leaf2,"user");
addApp(leaf2,"user");
assertEquals(1,leaf1.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getRunnableAppSchedulables().size());
assertEquals(2,leaf2.getNonRunnableAppSchedulables().size());
removeApp(app1);
assertEquals(0,leaf1.getRunnableAppSchedulables().size());
assertEquals(2,leaf2.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getNonRunnableAppSchedulables().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testRemoveEnablingOrderedByStartTime(){
FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1.subqueue1.leaf1",true);
FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue1.subqueue2.leaf2",true);
queueMaxApps.put("root.queue1",2);
FSAppAttempt app1=addApp(leaf1,"user");
addApp(leaf2,"user");
addApp(leaf2,"user");
clock.tick(20);
addApp(leaf1,"user");
assertEquals(1,leaf1.getRunnableAppSchedulables().size());
assertEquals(1,leaf2.getRunnableAppSchedulables().size());
assertEquals(1,leaf1.getNonRunnableAppSchedulables().size());
assertEquals(1,leaf2.getNonRunnableAppSchedulables().size());
removeApp(app1);
assertEquals(0,leaf1.getRunnableAppSchedulables().size());
assertEquals(2,leaf2.getRunnableAppSchedulables().size());
assertEquals(0,leaf2.getNonRunnableAppSchedulables().size());
}
InternalCallVerifier NullVerifier
@Test public void testReloadTurnsLeafQueueIntoParent() throws Exception {
updateConfiguredLeafQueues(queueManager,"queue1");
updateConfiguredLeafQueues(queueManager,"queue1.queue2");
assertNull(queueManager.getLeafQueue("queue1",false));
assertNotNull(queueManager.getLeafQueue("queue1.queue2",false));
updateConfiguredLeafQueues(queueManager,"queue1");
assertNull(queueManager.getLeafQueue("queue1.queue2",false));
assertNotNull(queueManager.getLeafQueue("queue1",false));
notEmptyQueues.add(queueManager.getLeafQueue("queue1",false));
updateConfiguredLeafQueues(queueManager,"queue1.queue2");
assertNull(queueManager.getLeafQueue("queue1.queue2",false));
assertNotNull(queueManager.getLeafQueue("queue1",false));
notEmptyQueues.clear();
updateConfiguredLeafQueues(queueManager,"queue1.queue2");
notEmptyQueues.add(queueManager.getQueue("root.queue1"));
updateConfiguredLeafQueues(queueManager,"queue1");
assertNotNull(queueManager.getLeafQueue("queue1.queue2",false));
assertNull(queueManager.getLeafQueue("queue1",false));
updateConfiguredLeafQueues(queueManager,"default.queue3");
assertNull(queueManager.getLeafQueue("default.queue3",false));
assertNotNull(queueManager.getLeafQueue("default",false));
}
InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testReloadTurnsLeafToParentWithNoLeaf(){
AllocationConfiguration allocConf=new AllocationConfiguration(conf);
allocConf.configuredQueues.get(FSQueueType.LEAF).add("root.queue1");
queueManager.updateAllocationConfiguration(allocConf);
assertNotNull(queueManager.getLeafQueue("root.queue1",false));
notEmptyQueues.add(queueManager.getLeafQueue("root.queue1",false));
allocConf=new AllocationConfiguration(conf);
allocConf.configuredQueues.get(FSQueueType.PARENT).add("root.queue1");
queueManager.updateAllocationConfiguration(allocConf);
assertNotNull(queueManager.getLeafQueue("root.queue1",false));
assertNull(queueManager.getParentQueue("root.queue1",false));
notEmptyQueues.clear();
queueManager.updateAllocationConfiguration(allocConf);
assertNull(queueManager.getLeafQueue("root.queue1",false));
assertNotNull(queueManager.getParentQueue("root.queue1",false));
assertTrue(queueManager.getParentQueue("root.queue1",false).getChildQueues().isEmpty());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSpecifiedThenReject() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","someuser"));
assertEquals(null,policy.assignAppToQueue("default","someuser"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNestedUserQueuePrimaryGroupNoCreate() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.default",policy.assignAppToQueue("root.default","user1"));
configuredQueues.get(FSQueueType.PARENT).add("root.user1group");
policy=parse(sb.toString());
assertEquals("root.user1group.user1",policy.assignAppToQueue("root.default","user1"));
sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
assertEquals("root.default",policy.assignAppToQueue("root.default","user2"));
configuredQueues.get(FSQueueType.PARENT).add("root.user2group");
configuredQueues.get(FSQueueType.LEAF).add("root.user2group.user2");
policy=parse(sb.toString());
assertEquals("root.user2group.user2",policy.assignAppToQueue("root.default","user2"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNoCreate() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
configuredQueues.get(FSQueueType.LEAF).add("root.someuser");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","someuser"));
assertEquals("root.someuser",policy.assignAppToQueue("default","someuser"));
assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","otheruser"));
assertEquals("root.default",policy.assignAppToQueue("default","otheruser"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNestedUserQueueSpecificRule() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
configuredQueues.get(FSQueueType.PARENT).add("root.parent1");
configuredQueues.get(FSQueueType.PARENT).add("root.parent2");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.parent1.user1",policy.assignAppToQueue("root.parent1","user1"));
assertEquals("root.parent2.user2",policy.assignAppToQueue("root.parent2","user2"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSpecifiedUserPolicy() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","someuser"));
assertEquals("root.someuser",policy.assignAppToQueue("default","someuser"));
assertEquals("root.otheruser",policy.assignAppToQueue("default","otheruser"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNestedUserQueuePrimaryGroup() throws Exception {
StringBuffer sb=new StringBuffer();
sb.append("");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
sb.append(" ");
QueuePlacementPolicy policy=parse(sb.toString());
assertEquals("root.user1group.user1",policy.assignAppToQueue("root.default","user1"));
configuredQueues.get(FSQueueType.LEAF).add("root.specifiedq");
assertEquals("root.specifiedq",policy.assignAppToQueue("root.specifiedq","user2"));
configuredQueues.get(FSQueueType.LEAF).add("root.user3group");
assertEquals("root.default",policy.assignAppToQueue("root.default","user3"));
}
InternalCallVerifier BooleanVerifier
/**
* Trivial tests that make sure{@link SchedulingPolicy#isApplicableTo(SchedulingPolicy,byte)} works as
* expected for the possible values of depth
* @throws AllocationConfigurationException
*/
@Test(timeout=1000) public void testIsApplicableTo() throws AllocationConfigurationException {
final String ERR="Broken SchedulingPolicy#isApplicableTo";
SchedulingPolicy policy=SchedulingPolicy.parse("fifo");
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_LEAF));
assertFalse(ERR,SchedulingPolicy.isApplicableTo(SchedulingPolicy.parse("fifo"),SchedulingPolicy.DEPTH_INTERMEDIATE));
assertFalse(ERR,SchedulingPolicy.isApplicableTo(SchedulingPolicy.parse("fifo"),SchedulingPolicy.DEPTH_ROOT));
policy=SchedulingPolicy.parse("fair");
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_LEAF));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_INTERMEDIATE));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ROOT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_PARENT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ANY));
policy=SchedulingPolicy.parse("drf");
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_LEAF));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_INTERMEDIATE));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ROOT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_PARENT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ANY));
policy=Mockito.mock(SchedulingPolicy.class);
Mockito.when(policy.getApplicableDepth()).thenReturn(SchedulingPolicy.DEPTH_PARENT);
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_INTERMEDIATE));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ROOT));
assertTrue(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_PARENT));
assertFalse(ERR,SchedulingPolicy.isApplicableTo(policy,SchedulingPolicy.DEPTH_ANY));
}
InternalCallVerifier EqualityVerifier
@Test public void testCalculateShares(){
Resource used=Resources.createResource(10,5);
Resource capacity=Resources.createResource(100,10);
ResourceType[] resourceOrder=new ResourceType[2];
ResourceWeights shares=new ResourceWeights();
DominantResourceFairnessPolicy.DominantResourceFairnessComparator comparator=new DominantResourceFairnessPolicy.DominantResourceFairnessComparator();
comparator.calculateShares(used,capacity,shares,resourceOrder,ResourceWeights.NEUTRAL);
assertEquals(.1,shares.getWeight(ResourceType.MEMORY),.00001);
assertEquals(.5,shares.getWeight(ResourceType.CPU),.00001);
assertEquals(ResourceType.CPU,resourceOrder[0]);
assertEquals(ResourceType.MEMORY,resourceOrder[1]);
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetAppsInQueue() throws Exception {
Application application_0=new Application("user_0",resourceManager);
application_0.submit();
Application application_1=new Application("user_0",resourceManager);
application_1.submit();
ResourceScheduler scheduler=resourceManager.getResourceScheduler();
List appsInDefault=scheduler.getAppsInQueue("default");
assertTrue(appsInDefault.contains(application_0.getApplicationAttemptId()));
assertTrue(appsInDefault.contains(application_1.getApplicationAttemptId()));
assertEquals(2,appsInDefault.size());
Assert.assertNull(scheduler.getAppsInQueue("someotherqueue"));
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testAppAttemptMetrics() throws Exception {
AsyncDispatcher dispatcher=new InlineDispatcher();
RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class);
RMContext rmContext=new RMContextImpl(dispatcher,null,null,null,null,null,null,null,null,writer);
FifoScheduler scheduler=new FifoScheduler();
Configuration conf=new Configuration();
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf,rmContext);
QueueMetrics metrics=scheduler.getRootQueueMetrics();
int beforeAppsSubmitted=metrics.getAppsSubmitted();
ApplicationId appId=BuilderUtils.newApplicationId(200,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId,"queue","user");
scheduler.handle(appEvent);
SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
scheduler.handle(attemptEvent);
appAttemptId=BuilderUtils.newApplicationAttemptId(appId,2);
SchedulerEvent attemptEvent2=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
scheduler.handle(attemptEvent2);
int afterAppsSubmitted=metrics.getAppsSubmitted();
Assert.assertEquals(1,afterAppsSubmitted - beforeAppsSubmitted);
scheduler.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier
@SuppressWarnings("resource") @Test public void testBlackListNodes() throws Exception {
Configuration conf=new Configuration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,FifoScheduler.class,ResourceScheduler.class);
MockRM rm=new MockRM(conf);
rm.start();
FifoScheduler fs=(FifoScheduler)rm.getResourceScheduler();
String host="127.0.0.1";
RMNode node=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,host);
fs.handle(new NodeAddedSchedulerEvent(node));
ApplicationId appId=BuilderUtils.newApplicationId(100,1);
ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1);
SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId,"default","user");
fs.handle(appEvent);
SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
fs.handle(attemptEvent);
fs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null);
Assert.assertTrue(fs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
fs.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host));
Assert.assertFalse(fs.getApplicationAttempt(appAttemptId).isBlacklisted(host));
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=2000) public void testUpdateResourceOnNode() throws Exception {
AsyncDispatcher dispatcher=new InlineDispatcher();
Configuration conf=new Configuration();
RMContainerTokenSecretManager containerTokenSecretManager=new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
NMTokenSecretManagerInRM nmTokenSecretManager=new NMTokenSecretManagerInRM(conf);
nmTokenSecretManager.rollMasterKey();
RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class);
RMContext rmContext=new RMContextImpl(dispatcher,null,null,null,null,null,containerTokenSecretManager,nmTokenSecretManager,null,writer);
FifoScheduler scheduler=new FifoScheduler(){
@SuppressWarnings("unused") public Map getNodes(){
return nodes;
}
}
;
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(new Configuration(),rmContext);
RMNode node0=MockNodes.newNodeInfo(1,Resources.createResource(2048,4),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node0);
scheduler.handle(nodeEvent1);
Method method=scheduler.getClass().getDeclaredMethod("getNodes");
@SuppressWarnings("unchecked") Map schedulerNodes=(Map)method.invoke(scheduler);
assertEquals(schedulerNodes.values().size(),1);
node0.setResourceOption(ResourceOption.newInstance(Resources.createResource(1024,4),RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT));
assertEquals(node0.getTotalCapability().getMemory(),1024);
assertEquals(schedulerNodes.get(node0.getNodeID()).getAvailableResource().getMemory(),2048);
NodeUpdateSchedulerEvent node0Update=new NodeUpdateSchedulerEvent(node0);
scheduler.handle(node0Update);
assertEquals(schedulerNodes.get(node0.getNodeID()).getAvailableResource().getMemory(),1024);
QueueInfo queueInfo=scheduler.getQueueInfo(null,false,false);
Assert.assertEquals(0.0f,queueInfo.getCurrentCapacity(),0.0f);
int _appId=1;
int _appAttemptId=1;
ApplicationAttemptId appAttemptId=createAppAttemptId(_appId,_appAttemptId);
AppAddedSchedulerEvent appEvent=new AppAddedSchedulerEvent(appAttemptId.getApplicationId(),"queue1","user1");
scheduler.handle(appEvent);
AppAttemptAddedSchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
scheduler.handle(attemptEvent);
int memory=1024;
int priority=1;
List ask=new ArrayList();
ResourceRequest nodeLocal=createResourceRequest(memory,node0.getHostName(),priority,1);
ResourceRequest rackLocal=createResourceRequest(memory,node0.getRackName(),priority,1);
ResourceRequest any=createResourceRequest(memory,ResourceRequest.ANY,priority,1);
ask.add(nodeLocal);
ask.add(rackLocal);
ask.add(any);
scheduler.allocate(appAttemptId,ask,new ArrayList(),null,null);
Assert.assertEquals(1,nodeLocal.getNumContainers());
scheduler.handle(node0Update);
Assert.assertEquals(0,nodeLocal.getNumContainers());
SchedulerAppReport info=scheduler.getSchedulerAppInfo(appAttemptId);
Assert.assertEquals(1,info.getLiveContainers().size());
queueInfo=scheduler.getQueueInfo(null,false,false);
Assert.assertEquals(1.0f,queueInfo.getCurrentCapacity(),0.0f);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test(timeout=2000) public void testNodeLocalAssignment() throws Exception {
AsyncDispatcher dispatcher=new InlineDispatcher();
Configuration conf=new Configuration();
RMContainerTokenSecretManager containerTokenSecretManager=new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
NMTokenSecretManagerInRM nmTokenSecretManager=new NMTokenSecretManagerInRM(conf);
nmTokenSecretManager.rollMasterKey();
RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class);
RMContext rmContext=new RMContextImpl(dispatcher,null,null,null,null,null,containerTokenSecretManager,nmTokenSecretManager,null,writer);
FifoScheduler scheduler=new FifoScheduler();
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(new Configuration(),rmContext);
RMNode node0=MockNodes.newNodeInfo(1,Resources.createResource(1024 * 64),1,"127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node0);
scheduler.handle(nodeEvent1);
int _appId=1;
int _appAttemptId=1;
ApplicationAttemptId appAttemptId=createAppAttemptId(_appId,_appAttemptId);
AppAddedSchedulerEvent appEvent=new AppAddedSchedulerEvent(appAttemptId.getApplicationId(),"queue1","user1");
scheduler.handle(appEvent);
AppAttemptAddedSchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false);
scheduler.handle(attemptEvent);
int memory=64;
int nConts=3;
int priority=20;
List ask=new ArrayList();
ResourceRequest nodeLocal=createResourceRequest(memory,node0.getHostName(),priority,nConts);
ResourceRequest rackLocal=createResourceRequest(memory,node0.getRackName(),priority,nConts);
ResourceRequest any=createResourceRequest(memory,ResourceRequest.ANY,priority,nConts);
ask.add(nodeLocal);
ask.add(rackLocal);
ask.add(any);
scheduler.allocate(appAttemptId,ask,new ArrayList(),null,null);
NodeUpdateSchedulerEvent node0Update=new NodeUpdateSchedulerEvent(node0);
Assert.assertEquals(3,nodeLocal.getNumContainers());
scheduler.handle(node0Update);
Assert.assertEquals(0,nodeLocal.getNumContainers());
SchedulerAppReport info=scheduler.getSchedulerAppInfo(appAttemptId);
Assert.assertEquals(3,info.getLiveContainers().size());
scheduler.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=5000) public void testFifoSchedulerCapacityWhenNoNMs(){
FifoScheduler scheduler=new FifoScheduler();
QueueInfo queueInfo=scheduler.getQueueInfo(null,false,false);
Assert.assertEquals(0.0f,queueInfo.getCurrentCapacity(),0.0f);
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Validate master-key-roll-over and that tokens are usable even after
* master-key-roll-over.
* @throws Exception
*/
@Test public void testMasterKeyRollOver() throws Exception {
conf.setLong(YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,rolling_interval_sec);
conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,am_expire_ms);
MyContainerManager containerManager=new MyContainerManager();
final MockRMWithAMS rm=new MockRMWithAMS(conf,containerManager);
rm.start();
Long startTime=System.currentTimeMillis();
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
ApplicationMasterProtocol rmClient=null;
AMRMTokenSecretManager appTokenSecretManager=rm.getRMContext().getAMRMTokenSecretManager();
MasterKeyData oldKey=appTokenSecretManager.getMasterKey();
Assert.assertNotNull(oldKey);
try {
MockNM nm1=rm.registerNode("localhost:1234",5120);
RMApp app=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.containerTokens == null && waitCount++ < maxWaitAttempts) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
rmClient=createRMClient(rm,conf,rpc,currentUser);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
rmClient.registerApplicationMaster(request);
AllocateRequest allocateRequest=Records.newRecord(AllocateRequest.class);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) {
rmClient.allocate(allocateRequest);
Thread.sleep(500);
}
MasterKeyData newKey=appTokenSecretManager.getMasterKey();
Assert.assertNotNull(newKey);
Assert.assertFalse("Master key should have changed!",oldKey.equals(newKey));
rpc.stopProxy(rmClient,conf);
rmClient=createRMClient(rm,conf,rpc,currentUser);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
waitCount=0;
while (waitCount++ <= maxWaitAttempts) {
if (appTokenSecretManager.getCurrnetMasterKeyData() != oldKey) {
break;
}
try {
rmClient.allocate(allocateRequest);
}
catch ( Exception ex) {
break;
}
Thread.sleep(200);
}
Assert.assertTrue(appTokenSecretManager.getCurrnetMasterKeyData().equals(newKey));
Assert.assertTrue(appTokenSecretManager.getMasterKey().equals(newKey));
Assert.assertTrue(appTokenSecretManager.getNextMasterKeyData() == null);
Token newToken=appTokenSecretManager.createAndGetAMRMToken(applicationAttemptId);
SecurityUtil.setTokenService(newToken,rmBindAddress);
currentUser.addToken(newToken);
rpc.stopProxy(rmClient,conf);
rmClient=createRMClient(rm,conf,rpc,currentUser);
allocateRequest=Records.newRecord(AllocateRequest.class);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
rpc.stopProxy(rmClient,conf);
try {
currentUser.addToken(amRMToken);
rmClient=createRMClient(rm,conf,rpc,currentUser);
allocateRequest=Records.newRecord(AllocateRequest.class);
Assert.assertTrue(rmClient.allocate(allocateRequest).getAMCommand() == null);
Assert.fail("The old Token should not work");
}
catch ( Exception ex) {
}
}
finally {
rm.stop();
if (rmClient != null) {
rpc.stopProxy(rmClient,conf);
}
}
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testAMRMMasterKeysUpdate() throws Exception {
MockRM rm=new MockRM(conf){
@Override protected void doSecureLogin() throws IOException {
}
}
;
rm.start();
MockNM nm=rm.registerNode("127.0.0.1:1234",8000);
RMApp app=rm.submitApp(200);
MockAM am=MockRM.launchAndRegisterAM(app,rm,nm);
AllocateResponse response=am.allocate(Records.newRecord(AllocateRequest.class));
Assert.assertNull(response.getAMRMToken());
rm.getRMContext().getAMRMTokenSecretManager().rollMasterKey();
response=am.allocate(Records.newRecord(AllocateRequest.class));
Assert.assertNotNull(response.getAMRMToken());
Token amrmToken=ConverterUtils.convertFromYarn(response.getAMRMToken(),new Text(response.getAMRMToken().getService()));
Assert.assertEquals(amrmToken.decodeIdentifier().getKeyId(),rm.getRMContext().getAMRMTokenSecretManager().getMasterKey().getMasterKey().getKeyId());
response=am.allocate(Records.newRecord(AllocateRequest.class));
Assert.assertNull(response.getAMRMToken());
rm.getRMContext().getAMRMTokenSecretManager().activateNextMasterKey();
response=am.allocate(Records.newRecord(AllocateRequest.class));
Assert.assertNull(response.getAMRMToken());
rm.stop();
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
/**
* Validate that application tokens are unusable after the
* application-finishes.
* @throws Exception
*/
@SuppressWarnings("unchecked") @Test public void testTokenExpiry() throws Exception {
MyContainerManager containerManager=new MyContainerManager();
final MockRMWithAMS rm=new MockRMWithAMS(conf,containerManager);
rm.start();
final Configuration conf=rm.getConfig();
final YarnRPC rpc=YarnRPC.create(conf);
ApplicationMasterProtocol rmClient=null;
try {
MockNM nm1=rm.registerNode("localhost:1234",5120);
RMApp app=rm.submitApp(1024);
nm1.nodeHeartbeat(true);
int waitCount=0;
while (containerManager.containerTokens == null && waitCount++ < 20) {
LOG.info("Waiting for AM Launch to happen..");
Thread.sleep(1000);
}
Assert.assertNotNull(containerManager.containerTokens);
RMAppAttempt attempt=app.getCurrentAppAttempt();
ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId();
UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
Credentials credentials=containerManager.getContainerCredentials();
final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress();
Token extends TokenIdentifier> amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens());
currentUser.addToken(amRMToken);
rmClient=createRMClient(rm,conf,rpc,currentUser);
RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class);
rmClient.registerApplicationMaster(request);
FinishApplicationMasterRequest finishAMRequest=Records.newRecord(FinishApplicationMasterRequest.class);
finishAMRequest.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);
finishAMRequest.setDiagnostics("diagnostics");
finishAMRequest.setTrackingUrl("url");
rmClient.finishApplicationMaster(finishAMRequest);
ContainerStatus containerStatus=BuilderUtils.newContainerStatus(attempt.getMasterContainer().getId(),ContainerState.COMPLETE,"AM Container Finished",0);
rm.getRMContext().getDispatcher().getEventHandler().handle(new RMAppAttemptContainerFinishedEvent(applicationAttemptId,containerStatus));
int count=0;
while (attempt.getState() != RMAppAttemptState.FINISHED && count < maxWaitAttempts) {
Thread.sleep(100);
count++;
}
Assert.assertTrue(attempt.getState() == RMAppAttemptState.FINISHED);
rpc.stopProxy(rmClient,conf);
rmClient=createRMClient(rm,conf,rpc,currentUser);
AllocateRequest allocateRequest=Records.newRecord(AllocateRequest.class);
try {
rmClient.allocate(allocateRequest);
Assert.fail("You got to be kidding me! " + "Using App tokens after app-finish should fail!");
}
catch ( Throwable t) {
LOG.info("Exception found is ",t);
Assert.assertTrue(t.getCause().getMessage().contains(applicationAttemptId.toString() + " not found in AMRMTokenSecretManager."));
}
}
finally {
rm.stop();
if (rmClient != null) {
rpc.stopProxy(rmClient,conf);
}
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier HybridVerifier
@Test public void testClientToAMTokens() throws Exception {
final Configuration conf=new Configuration();
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos");
UserGroupInformation.setConfiguration(conf);
ContainerManagementProtocol containerManager=mock(ContainerManagementProtocol.class);
StartContainersResponse mockResponse=mock(StartContainersResponse.class);
when(containerManager.startContainers((StartContainersRequest)any())).thenReturn(mockResponse);
final DrainDispatcher dispatcher=new DrainDispatcher();
MockRM rm=new MockRMWithCustomAMLauncher(conf,containerManager){
protected ClientRMService createClientRMService(){
return new ClientRMService(this.rmContext,scheduler,this.rmAppManager,this.applicationACLsManager,this.queueACLsManager,getRMContext().getRMDelegationTokenSecretManager());
}
@Override protected Dispatcher createDispatcher(){
return dispatcher;
}
@Override protected void doSecureLogin() throws IOException {
}
}
;
rm.start();
RMApp app=rm.submitApp(1024);
MockNM nm1=rm.registerNode("localhost:1234",3072);
nm1.nodeHeartbeat(true);
dispatcher.await();
nm1.nodeHeartbeat(true);
dispatcher.await();
ApplicationAttemptId appAttempt=app.getCurrentAppAttempt().getAppAttemptId();
final MockAM mockAM=new MockAM(rm.getRMContext(),rm.getApplicationMasterService(),app.getCurrentAppAttempt().getAppAttemptId());
UserGroupInformation appUgi=UserGroupInformation.createRemoteUser(appAttempt.toString());
RegisterApplicationMasterResponse response=appUgi.doAs(new PrivilegedAction(){
@Override public RegisterApplicationMasterResponse run(){
RegisterApplicationMasterResponse response=null;
try {
response=mockAM.registerAppAttempt();
}
catch ( Exception e) {
Assert.fail("Exception was not expected");
}
return response;
}
}
);
GetApplicationReportRequest request=Records.newRecord(GetApplicationReportRequest.class);
request.setApplicationId(app.getApplicationId());
GetApplicationReportResponse reportResponse=rm.getClientRMService().getApplicationReport(request);
ApplicationReport appReport=reportResponse.getApplicationReport();
org.apache.hadoop.yarn.api.records.Token originalClientToAMToken=appReport.getClientToAMToken();
Assert.assertNotNull(response.getClientToAMTokenMasterKey());
Assert.assertTrue(response.getClientToAMTokenMasterKey().array().length > 0);
ApplicationAttemptId appAttemptId=app.getAppAttempts().keySet().iterator().next();
Assert.assertNotNull(appAttemptId);
final CustomAM am=new CustomAM(appAttemptId,response.getClientToAMTokenMasterKey().array());
am.init(conf);
am.start();
SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
try {
CustomProtocol client=(CustomProtocol)RPC.getProxy(CustomProtocol.class,1L,am.address,conf);
client.ping();
fail("Access by unauthenticated user should fail!!");
}
catch ( Exception e) {
Assert.assertFalse(am.pinged);
}
Token token=ConverterUtils.convertFromYarn(originalClientToAMToken,am.address);
verifyTokenWithTamperedID(conf,am,token);
verifyTokenWithTamperedUserName(conf,am,token);
verifyValidToken(conf,am,token);
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=60000) public void testAppRejectionWithCancelledDelegationToken() throws Exception {
MyFS dfs=(MyFS)FileSystem.get(conf);
LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ conf.hashCode());
MyToken token=dfs.getDelegationToken("user1");
token.cancelToken();
Credentials ts=new Credentials();
ts.addToken(token.getKind(),token);
ApplicationId appId=BuilderUtils.newApplicationId(0,0);
delegationTokenRenewer.addApplicationAsync(appId,ts,true);
int waitCnt=20;
while (waitCnt-- > 0) {
if (!eventQueue.isEmpty()) {
Event evt=eventQueue.take();
if (evt.getType() == RMAppEventType.APP_REJECTED) {
Assert.assertTrue(((RMAppEvent)evt).getApplicationId().equals(appId));
return;
}
}
else {
Thread.sleep(500);
}
}
fail("App submission with a cancelled token should have failed");
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
/**
* Basic idea of the test:
* 1. create tokens.
* 2. Mark one of them to be renewed in 2 seconds (instead of
* 24 hours)
* 3. register them for renewal
* 4. sleep for 3 seconds
* 5. count number of renewals (should 3 initial ones + one extra)
* 6. register another token for 2 seconds
* 7. cancel it immediately
* 8. Sleep and check that the 2 seconds renew didn't happen
* (totally 5 renewals)
* 9. check cancellation
* @throws IOException
* @throws URISyntaxException
*/
@Test(timeout=60000) public void testDTRenewal() throws Exception {
MyFS dfs=(MyFS)FileSystem.get(conf);
LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ conf.hashCode());
MyToken token1, token2, token3;
token1=dfs.getDelegationToken("user1");
token2=dfs.getDelegationToken("user2");
token3=dfs.getDelegationToken("user3");
Renewer.tokenToRenewIn2Sec=token1;
LOG.info("token=" + token1 + " should be renewed for 2 secs");
String nn1=DelegationTokenRenewer.SCHEME + "://host1:0";
String nn2=DelegationTokenRenewer.SCHEME + "://host2:0";
String nn3=DelegationTokenRenewer.SCHEME + "://host3:0";
Credentials ts=new Credentials();
ts.addToken(new Text(nn1),token1);
ts.addToken(new Text(nn2),token2);
ts.addToken(new Text(nn3),token3);
ApplicationId applicationId_0=BuilderUtils.newApplicationId(0,0);
delegationTokenRenewer.addApplicationAsync(applicationId_0,ts,true);
waitForEventsToGetProcessed(delegationTokenRenewer);
int numberOfExpectedRenewals=3 + 1;
int attempts=10;
while (attempts-- > 0) {
try {
Thread.sleep(3 * 1000);
}
catch ( InterruptedException e) {
}
if (Renewer.counter == numberOfExpectedRenewals) break;
}
LOG.info("dfs=" + dfs.hashCode() + ";Counter = "+ Renewer.counter+ ";t="+ Renewer.lastRenewed);
assertEquals("renew wasn't called as many times as expected(4):",numberOfExpectedRenewals,Renewer.counter);
assertEquals("most recently renewed token mismatch",Renewer.lastRenewed,token1);
ts=new Credentials();
MyToken token4=dfs.getDelegationToken("user4");
Renewer.tokenToRenewIn2Sec=token4;
LOG.info("token=" + token4 + " should be renewed for 2 secs");
String nn4=DelegationTokenRenewer.SCHEME + "://host4:0";
ts.addToken(new Text(nn4),token4);
ApplicationId applicationId_1=BuilderUtils.newApplicationId(0,1);
delegationTokenRenewer.addApplicationAsync(applicationId_1,ts,true);
waitForEventsToGetProcessed(delegationTokenRenewer);
delegationTokenRenewer.applicationFinished(applicationId_1);
waitForEventsToGetProcessed(delegationTokenRenewer);
numberOfExpectedRenewals=Renewer.counter;
try {
Thread.sleep(6 * 1000);
}
catch ( InterruptedException e) {
}
LOG.info("Counter = " + Renewer.counter + ";t="+ Renewer.lastRenewed);
assertEquals("renew wasn't called as many times as expected",numberOfExpectedRenewals,Renewer.counter);
try {
token4.renew(conf);
fail("Renewal of cancelled token should have failed");
}
catch ( InvalidToken ite) {
}
}
InternalCallVerifier EqualityVerifier
@Test(timeout=15000) public void testRMDTMasterKeyStateOnRollingMasterKey() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Map rmDTState=rmState.getRMDTSecretManagerState().getTokenState();
Set rmDTMasterKeyState=rmState.getRMDTSecretManagerState().getMasterKeyState();
MockRM rm1=new MyMockRM(conf,memStore);
rm1.start();
RMDelegationTokenSecretManager dtSecretManager=rm1.getRMContext().getRMDelegationTokenSecretManager();
Assert.assertEquals(dtSecretManager.getAllMasterKeys(),rmDTMasterKeyState);
Set expiringKeys=new HashSet();
expiringKeys.addAll(dtSecretManager.getAllMasterKeys());
GetDelegationTokenRequest request=mock(GetDelegationTokenRequest.class);
when(request.getRenewer()).thenReturn("renewer1");
GetDelegationTokenResponse response=rm1.getClientRMService().getDelegationToken(request);
org.apache.hadoop.yarn.api.records.Token delegationToken=response.getRMDelegationToken();
Token token1=ConverterUtils.convertFromYarn(delegationToken,(Text)null);
RMDelegationTokenIdentifier dtId1=token1.decodeIdentifier();
while (((TestRMDelegationTokenSecretManager)dtSecretManager).numUpdatedKeys.get() < 3) {
((TestRMDelegationTokenSecretManager)dtSecretManager).checkCurrentKeyInStateStore(rmDTMasterKeyState);
Thread.sleep(100);
}
int count=0;
while (rmDTState.containsKey(dtId1) && count < 100) {
Thread.sleep(100);
count++;
}
rm1.stop();
}
InternalCallVerifier EqualityVerifier
@Test(timeout=15000) public void testRemoveExpiredMasterKeyInRMStateStore() throws Exception {
MemoryRMStateStore memStore=new MemoryRMStateStore();
memStore.init(conf);
RMState rmState=memStore.getState();
Set rmDTMasterKeyState=rmState.getRMDTSecretManagerState().getMasterKeyState();
MockRM rm1=new MyMockRM(conf,memStore);
rm1.start();
RMDelegationTokenSecretManager dtSecretManager=rm1.getRMContext().getRMDelegationTokenSecretManager();
Assert.assertEquals(dtSecretManager.getAllMasterKeys(),rmDTMasterKeyState);
Set expiringKeys=new HashSet();
expiringKeys.addAll(dtSecretManager.getAllMasterKeys());
while (true) {
boolean allExpired=true;
for ( DelegationKey key : expiringKeys) {
if (rmDTMasterKeyState.contains(key)) {
allExpired=false;
}
}
if (allExpired) break;
Thread.sleep(500);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testControllerIndex(){
Injector injector=WebAppTests.createMockInjector(TestRMWebApp.class,this,new Module(){
@Override public void configure( Binder binder){
binder.bind(ApplicationACLsManager.class).toInstance(new ApplicationACLsManager(new Configuration()));
}
}
);
RmController c=injector.getInstance(RmController.class);
c.index();
assertEquals("Applications",c.get(TITLE,"unknown"));
}
InternalCallVerifier EqualityVerifier
@Test public void testInfoXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("info").accept("application/xml").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
verifyClusterInfoXML(xml);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidAccept() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("cluster").accept(MediaType.TEXT_PLAIN).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterInfo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterMetricsDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("metrics").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterMetricsJSON(json);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri2() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterSchedulerFifo() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterSchedulerFifo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testInfoSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("info/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterInfo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterSchedulerFifoSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterSchedulerFifo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testCluster() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterInfo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testInfoDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("info").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterInfo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterInfo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterMetrics() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("metrics").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterMetricsJSON(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testInfo() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("info").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterInfo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterSchedulerFifoDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterSchedulerFifo(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterMetricsSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("metrics/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterMetricsJSON(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterMetricsXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("metrics").accept("application/xml").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
verifyClusterMetricsXML(xml);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterSchedulerFifoXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
verifySchedulerFifoXML(xml);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidUri() throws JSONException, Exception {
WebResource r=resource();
String responseStr="";
try {
responseStr=r.path("ws").path("v1").path("cluster").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class);
fail("should have thrown exception on invalid uri");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryStates() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
params.add("states",YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStatesNone() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("states",YarnApplicationState.RUNNING.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStartEnd() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
long end=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStartBegin() throws JSONException, Exception {
rm.start();
long start=System.currentTimeMillis();
Thread.sleep(1);
rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryLimit() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("limit","2").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsXMLMulti() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB,"testwordcount","user1");
rm.submitApp(2048,"testwordcount2","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("apps");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",2,nodes.getLength());
rm.stop();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNonexistApp() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid appid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
finally {
rm.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryFinalStatus() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finalStatus",FinalApplicationStatus.UNDEFINED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
System.out.println(json.toString());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
verifyAppInfo(array.getJSONObject(0),app1);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryFinishBegin() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
long start=System.currentTimeMillis();
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
rm.stop();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryFinalStatusInvalid() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").queryParam("finalStatus","INVALID_test").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid state query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringContains("exception message","org.apache.hadoop.yarn.api.records.FinalApplicationStatus.INVALID_test",message);
WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type);
WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname);
}
finally {
rm.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAppsXML() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").path(app1.getApplicationId().toString()).accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyAppsXML(nodes,app1);
rm.stop();
}
IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppStatistics() throws JSONException, Exception {
try {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",4096);
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE");
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE");
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"OTHER");
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("appstatistics").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject appsStatInfo=json.getJSONObject("appStatInfo");
assertEquals("incorrect number of elements",1,appsStatInfo.length());
JSONArray statItems=appsStatInfo.getJSONArray("statItem");
assertEquals("incorrect number of elements",YarnApplicationState.values().length,statItems.length());
for (int i=0; i < YarnApplicationState.values().length; ++i) {
assertEquals("*",statItems.getJSONObject(0).getString("type"));
if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) {
assertEquals("2",statItems.getJSONObject(0).getString("count"));
}
else if (statItems.getJSONObject(0).getString("state").equals("FINISHED")) {
assertEquals("1",statItems.getJSONObject(0).getString("count"));
}
else {
assertEquals("0",statItems.getJSONObject(0).getString("count"));
}
}
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states",YarnApplicationState.ACCEPTED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
appsStatInfo=json.getJSONObject("appStatInfo");
assertEquals("incorrect number of elements",1,appsStatInfo.length());
statItems=appsStatInfo.getJSONArray("statItem");
assertEquals("incorrect number of elements",1,statItems.length());
assertEquals("ACCEPTED",statItems.getJSONObject(0).getString("state"));
assertEquals("*",statItems.getJSONObject(0).getString("type"));
assertEquals("2",statItems.getJSONObject(0).getString("count"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
appsStatInfo=json.getJSONObject("appStatInfo");
assertEquals("incorrect number of elements",1,appsStatInfo.length());
statItems=appsStatInfo.getJSONArray("statItem");
assertEquals("incorrect number of elements",YarnApplicationState.values().length,statItems.length());
for (int i=0; i < YarnApplicationState.values().length; ++i) {
assertEquals("mapreduce",statItems.getJSONObject(0).getString("type"));
if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) {
assertEquals("1",statItems.getJSONObject(0).getString("count"));
}
else if (statItems.getJSONObject(0).getString("state").equals("FINISHED")) {
assertEquals("1",statItems.getJSONObject(0).getString("count"));
}
else {
assertEquals("0",statItems.getJSONObject(0).getString("count"));
}
}
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("applicationTypes","MAPREDUCE,OTHER").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject exception=json.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String className=exception.getString("javaClassName");
WebServicesTestUtils.checkStringContains("exception message","we temporarily support at most one applicationType",message);
WebServicesTestUtils.checkStringEqual("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringEqual("exception className","org.apache.hadoop.yarn.webapp.BadRequestException",className);
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states",YarnApplicationState.FINISHED.toString() + "," + YarnApplicationState.ACCEPTED.toString()).queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
appsStatInfo=json.getJSONObject("appStatInfo");
assertEquals("incorrect number of elements",1,appsStatInfo.length());
statItems=appsStatInfo.getJSONArray("statItem");
assertEquals("incorrect number of elements",2,statItems.length());
JSONObject statItem1=statItems.getJSONObject(0);
JSONObject statItem2=statItems.getJSONObject(1);
assertTrue((statItem1.getString("state").equals("ACCEPTED") && statItem2.getString("state").equals("FINISHED")) || (statItem2.getString("state").equals("ACCEPTED") && statItem1.getString("state").equals("FINISHED")));
assertEquals("mapreduce",statItem1.getString("type"));
assertEquals("1",statItem1.getString("count"));
assertEquals("mapreduce",statItem2.getString("type"));
assertEquals("1",statItem2.getString("count"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states","wrong_state").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
exception=json.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
message=exception.getString("message");
type=exception.getString("exception");
className=exception.getString("javaClassName");
WebServicesTestUtils.checkStringContains("exception message","Invalid application-state wrong_state",message);
WebServicesTestUtils.checkStringEqual("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringEqual("exception className","org.apache.hadoop.yarn.webapp.BadRequestException",className);
}
finally {
rm.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryQueue() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("queue","default").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStateNone() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("state",YarnApplicationState.RUNNING.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
rm.stop();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidApp() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").path("application_invalid_12").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid appid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","For input string: \"invalid\"",message);
WebServicesTestUtils.checkStringMatch("exception type","NumberFormatException",type);
WebServicesTestUtils.checkStringMatch("exception classname","java.lang.NumberFormatException",classname);
}
finally {
rm.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryFinishEnd() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long end=System.currentTimeMillis();
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryUser() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("user",UserGroupInformation.getCurrentUser().getShortUserName()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStartBeginEnd() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
long start=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long end=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).queryParam("startedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
rm.stop();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryStatesInvalid() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").queryParam("states","INVALID_test").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid state query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringContains("exception message","Invalid application-state INVALID_test",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
finally {
rm.stop();
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidAppAttempts() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").path("application_invalid_12").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid appid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","For input string: \"invalid\"",message);
WebServicesTestUtils.checkStringMatch("exception type","NumberFormatException",type);
WebServicesTestUtils.checkStringMatch("exception classname","java.lang.NumberFormatException",classname);
}
finally {
rm.stop();
}
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryAppTypes() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE");
rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"NON-YARN");
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("MAPREDUCE",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("YARN",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, YARN ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("YARN",array.getJSONObject(0).getString("applicationType"));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",3,array.length());
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN, ,NON-YARN, ,,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN")));
r=resource();
response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes"," YARN, , ,,,").queryParam("applicationTypes","MAPREDUCE , ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE")));
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryState() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("state",YarnApplicationState.ACCEPTED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
verifyAppInfo(array.getJSONObject(0),app1);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryFinishBeginEnd() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
long start=System.currentTimeMillis();
Thread.sleep(1);
RMApp app1=rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long end=System.currentTimeMillis();
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeBegin",String.valueOf(start)).queryParam("finishedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
rm.stop();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNonexistAppAttempts() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid appid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message);
WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname);
}
finally {
rm.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppAttemptsXML() throws JSONException, Exception {
rm.start();
String user="user1";
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount",user);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").path(app1.getApplicationId().toString()).path("appattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("appAttempts");
assertEquals("incorrect number of elements",1,nodes.getLength());
NodeList attempt=dom.getElementsByTagName("appAttempt");
assertEquals("incorrect number of elements",1,attempt.getLength());
verifyAppAttemptsXML(attempt,app1.getCurrentAppAttempt(),user);
rm.stop();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=20000) public void testMultipleAppAttempts() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",8192);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1");
MockAM am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager);
int maxAppAttempts=rm.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
assertTrue(maxAppAttempts > 1);
int numAttempt=1;
while (true) {
amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FAILED);
if (numAttempt == maxAppAttempts) {
rm.waitForState(app1.getApplicationId(),RMAppState.FAILED);
break;
}
rm.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED);
am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager);
numAttempt++;
}
assertEquals("incorrect number of attempts",maxAppAttempts,app1.getAppAttempts().values().size());
testAppAttemptsHelper(app1.getApplicationId().toString(),app1,MediaType.APPLICATION_JSON);
rm.stop();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryStateInvalid() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("apps").queryParam("state","INVALID_test").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on invalid state query");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringContains("exception message","Invalid application-state INVALID_test",message);
WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type);
WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname);
}
finally {
rm.stop();
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testAppsQueryStatesComma() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
RMApp killedApp=rm.submitApp(CONTAINER_MB);
rm.killApp(killedApp.getApplicationId());
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
MultivaluedMapImpl params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString());
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state"));
r=resource();
params=new MultivaluedMapImpl();
params.add("states",YarnApplicationState.ACCEPTED.toString() + "," + YarnApplicationState.KILLED.toString());
response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",2,array.length());
assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED")));
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryFinalStatusNone() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finalStatus",FinalApplicationStatus.KILLED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("apps is not null",JSONObject.NULL,json.get("apps"));
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsXML() throws JSONException, Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1");
amNodeManager.nodeHeartbeat(true);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("apps");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("app");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyAppsXML(nodes,app1);
rm.stop();
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppsQueryStartBeginSome() throws JSONException, Exception {
rm.start();
rm.registerNode("127.0.0.1:1234",2048);
rm.submitApp(CONTAINER_MB);
rm.submitApp(CONTAINER_MB);
long start=System.currentTimeMillis();
Thread.sleep(1);
rm.submitApp(CONTAINER_MB);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject apps=json.getJSONObject("apps");
assertEquals("incorrect number of elements",1,apps.length());
JSONArray array=apps.getJSONArray("app");
assertEquals("incorrect number of elements",1,array.length());
rm.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAppKillInvalidState() throws Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
MediaType[] contentTypes={MediaType.APPLICATION_JSON_TYPE,MediaType.APPLICATION_XML_TYPE};
String[] targetStates={YarnApplicationState.FINISHED.toString(),"blah"};
for ( String mediaType : mediaTypes) {
for ( MediaType contentType : contentTypes) {
for ( String targetStateString : targetStates) {
RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName);
amNodeManager.nodeHeartbeat(true);
ClientResponse response;
AppState targetState=new AppState(targetStateString);
Object entity;
if (contentType == MediaType.APPLICATION_JSON_TYPE) {
entity=appStateToJSON(targetState);
}
else {
entity=targetState;
}
response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").entity(entity,contentType).accept(mediaType).put(ClientResponse.class);
if (!isAuthenticationEnabled()) {
assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus());
continue;
}
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
}
}
}
rm.stop();
return;
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=90000) public void testSingleAppKill() throws Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
MediaType[] contentTypes={MediaType.APPLICATION_JSON_TYPE,MediaType.APPLICATION_XML_TYPE};
for ( String mediaType : mediaTypes) {
for ( MediaType contentType : contentTypes) {
RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName);
amNodeManager.nodeHeartbeat(true);
ClientResponse response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).get(ClientResponse.class);
AppState targetState=new AppState(YarnApplicationState.KILLED.toString());
Object entity;
if (contentType == MediaType.APPLICATION_JSON_TYPE) {
entity=appStateToJSON(targetState);
}
else {
entity=targetState;
}
response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").entity(entity,contentType).accept(mediaType).put(ClientResponse.class);
if (!isAuthenticationEnabled()) {
assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus());
continue;
}
assertEquals(Status.ACCEPTED,response.getClientResponseStatus());
if (mediaType == MediaType.APPLICATION_JSON) {
verifyAppStateJson(response,RMAppState.KILLING,RMAppState.ACCEPTED);
}
else {
verifyAppStateXML(response,RMAppState.KILLING,RMAppState.ACCEPTED);
}
String locationHeaderValue=response.getHeaders().getFirst(HttpHeaders.LOCATION);
Client c=Client.create();
WebResource tmp=c.resource(locationHeaderValue);
if (isAuthenticationEnabled()) {
tmp=tmp.queryParam("user.name",webserviceUserName);
}
response=tmp.get(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
assertTrue(locationHeaderValue.endsWith("/ws/v1/cluster/apps/" + app.getApplicationId().toString() + "/state"));
while (true) {
Thread.sleep(100);
response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).entity(entity,contentType).put(ClientResponse.class);
assertTrue((response.getClientResponseStatus() == Status.ACCEPTED) || (response.getClientResponseStatus() == Status.OK));
if (response.getClientResponseStatus() == Status.OK) {
assertEquals(RMAppState.KILLED,app.getState());
if (mediaType == MediaType.APPLICATION_JSON) {
verifyAppStateJson(response,RMAppState.KILLED);
}
else {
verifyAppStateXML(response,RMAppState.KILLED);
}
break;
}
}
}
}
rm.stop();
return;
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAppState() throws Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
for ( String mediaType : mediaTypes) {
RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName);
amNodeManager.nodeHeartbeat(true);
ClientResponse response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).get(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
if (mediaType == MediaType.APPLICATION_JSON) {
verifyAppStateJson(response,RMAppState.ACCEPTED);
}
else if (mediaType == MediaType.APPLICATION_XML) {
verifyAppStateXML(response,RMAppState.ACCEPTED);
}
}
rm.stop();
}
APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleAppKillInvalidId() throws Exception {
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
amNodeManager.nodeHeartbeat(true);
String[] testAppIds={"application_1391705042196_0001","random_string"};
for ( String testAppId : testAppIds) {
AppState info=new AppState("KILLED");
ClientResponse response=this.constructWebResource("apps",testAppId,"state").accept(MediaType.APPLICATION_XML).entity(info,MediaType.APPLICATION_XML).put(ClientResponse.class);
if (!isAuthenticationEnabled()) {
assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus());
continue;
}
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
}
rm.stop();
return;
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testAppSubmitBadJsonAndXML() throws Exception {
String urlPath="apps";
rm.start();
MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048);
amNodeManager.nodeHeartbeat(true);
ApplicationSubmissionContextInfo appInfo=new ApplicationSubmissionContextInfo();
appInfo.setApplicationName("test");
appInfo.setPriority(3);
appInfo.setMaxAppAttempts(2);
appInfo.setQueue("testqueue");
appInfo.setApplicationType("test-type");
HashMap lr=new HashMap();
LocalResourceInfo y=new LocalResourceInfo();
y.setUrl(new URI("http://www.test.com/file.txt"));
y.setSize(100);
y.setTimestamp(System.currentTimeMillis());
y.setType(LocalResourceType.FILE);
y.setVisibility(LocalResourceVisibility.APPLICATION);
lr.put("example",y);
appInfo.getContainerLaunchContextInfo().setResources(lr);
appInfo.getResource().setMemory(1024);
appInfo.getResource().setvCores(1);
String body=" ";
ClientResponse response=this.constructWebResource(urlPath).accept(MediaType.APPLICATION_XML).entity(body,MediaType.APPLICATION_XML).post(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
body="{\"a\" : \"b\"}";
response=this.constructWebResource(urlPath).accept(MediaType.APPLICATION_XML).entity(body,MediaType.APPLICATION_JSON).post(ClientResponse.class);
validateResponseStatus(response,Status.BAD_REQUEST);
rm.stop();
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testPerUserResourcesJSON() throws Exception {
rm.start();
try {
rm.submitApp(10,"app1","user1",null,"b1");
rm.submitApp(20,"app2","user2",null,"b1");
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
JSONObject schedulerInfo=json.getJSONObject("scheduler").getJSONObject("schedulerInfo");
JSONObject b1=getSubQueue(getSubQueue(schedulerInfo,"b"),"b1");
JSONArray users=b1.getJSONObject("users").getJSONArray("user");
for (int i=0; i < 2; ++i) {
JSONObject user=users.getJSONObject(i);
assertTrue("User isn't user1 or user2",user.getString("username").equals("user1") || user.getString("username").equals("user2"));
user.getInt("numActiveApplications");
user.getInt("numPendingApplications");
checkResourcesUsed(user);
}
}
finally {
rm.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterSchedulerSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterScheduler(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterScheduler() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterScheduler(json);
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testClusterSchedulerXML() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList scheduler=dom.getElementsByTagName("scheduler");
assertEquals("incorrect number of elements",1,scheduler.getLength());
NodeList schedulerInfo=dom.getElementsByTagName("schedulerInfo");
assertEquals("incorrect number of elements",1,schedulerInfo.getLength());
verifyClusterSchedulerXML(schedulerInfo);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterSchedulerDefault() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterScheduler(json);
}
APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test per user resources and resourcesUsed elements in the web services XML
* @throws Exception
*/
@Test public void testPerUserResourcesXML() throws Exception {
rm.start();
try {
rm.submitApp(10,"app1","user1",null,"b1");
rm.submitApp(20,"app2","user2",null,"b1");
WebResource r=resource();
ClientResponse response=r.path("ws/v1/cluster/scheduler").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilder db=DocumentBuilderFactory.newInstance().newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList allUsers=dom.getElementsByTagName("users");
for (int i=0; i < allUsers.getLength(); ++i) {
Node perUserResources=allUsers.item(i);
String queueName=getChildNodeByName(perUserResources.getParentNode(),"queueName").getTextContent();
if (queueName.equals("b1")) {
assertEquals(2,perUserResources.getChildNodes().getLength());
NodeList users=perUserResources.getChildNodes();
for (int j=0; j < users.getLength(); ++j) {
Node user=users.item(j);
String username=getChildNodeByName(user,"username").getTextContent();
assertTrue(username.equals("user1") || username.equals("user2"));
Integer.parseInt(getChildNodeByName(getChildNodeByName(user,"resourcesUsed"),"memory").getTextContent());
Integer.parseInt(getChildNodeByName(user,"numActiveApplications").getTextContent());
Integer.parseInt(getChildNodeByName(user,"numPendingApplications").getTextContent());
}
}
else {
assertEquals(0,perUserResources.getChildNodes().getLength());
}
}
NodeList allResourcesUsed=dom.getElementsByTagName("resourcesUsed");
for (int i=0; i < allResourcesUsed.getLength(); ++i) {
Node resourcesUsed=allResourcesUsed.item(i);
Integer.parseInt(getChildNodeByName(resourcesUsed,"memory").getTextContent());
Integer.parseInt(getChildNodeByName(resourcesUsed,"vCores").getTextContent());
}
}
finally {
rm.stop();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testDelegationTokenAuth() throws Exception {
final String token=getDelegationToken("test");
ApplicationSubmissionContextInfo app=new ApplicationSubmissionContextInfo();
String appid="application_123_0";
app.setApplicationId(appid);
String requestBody=getMarshalledAppInfo(app);
URL url=new URL("http://localhost:8088/ws/v1/cluster/apps");
HttpURLConnection conn=(HttpURLConnection)url.openConnection();
setupConn(conn,"POST","application/xml",requestBody);
try {
conn.getInputStream();
fail("we should not be here");
}
catch ( IOException e) {
assertEquals(Status.UNAUTHORIZED.getStatusCode(),conn.getResponseCode());
}
conn=(HttpURLConnection)url.openConnection();
conn.setRequestProperty(DelegationTokenHeader,token);
setupConn(conn,"POST",MediaType.APPLICATION_XML,requestBody);
conn.getInputStream();
boolean appExists=rm.getRMContext().getRMApps().containsKey(ConverterUtils.toApplicationId(appid));
assertTrue(appExists);
RMApp actualApp=rm.getRMContext().getRMApps().get(ConverterUtils.toApplicationId(appid));
String owner=actualApp.getUser();
assertEquals("client",owner);
return;
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testRenewDelegationToken() throws Exception {
client().addFilter(new LoggingFilter(System.out));
rm.start();
final String renewer="client2";
this.client().addFilter(new LoggingFilter(System.out));
final DelegationToken dummyToken=new DelegationToken();
dummyToken.setRenewer(renewer);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
for ( final String mediaType : mediaTypes) {
for ( final String contentType : mediaTypes) {
if (isKerberosAuth == false) {
verifySimpleAuthRenew(mediaType,contentType);
continue;
}
final DelegationToken responseToken=KerberosTestUtils.doAsClient(new Callable(){
@Override public DelegationToken call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dummyToken,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
assertFalse(tok.getToken().isEmpty());
String body=generateRenewTokenBody(mediaType,tok.getToken());
response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,tok.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.FORBIDDEN,response.getClientResponseStatus());
return tok;
}
}
);
KerberosTestUtils.doAs(renewer,new Callable(){
@Override public DelegationToken call() throws Exception {
long oldExpirationTime=Time.now();
assertValidRMToken(responseToken.getToken());
String body=generateRenewTokenBody(mediaType,responseToken.getToken());
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
String message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime();
assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime);
oldExpirationTime=tok.getNextExpirationTime();
Thread.sleep(1000);
response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
tok=getDelegationTokenFromResponse(response);
message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime();
assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime);
return tok;
}
}
);
KerberosTestUtils.doAs("client3",new Callable(){
@Override public DelegationToken call() throws Exception {
String body=generateRenewTokenBody(mediaType,responseToken.getToken());
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.FORBIDDEN,response.getClientResponseStatus());
return null;
}
}
);
KerberosTestUtils.doAsClient(new Callable(){
@Override public Void call() throws Exception {
String token="TEST_TOKEN_STRING";
String body="";
if (mediaType.equals(MediaType.APPLICATION_JSON)) {
body="{\"token\": \"" + token + "\" }";
}
else {
body="" + token + " ";
}
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").accept(contentType).entity(body,mediaType).post(ClientResponse.class);
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
return null;
}
}
);
}
}
rm.stop();
return;
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testCancelDelegationToken() throws Exception {
rm.start();
this.client().addFilter(new LoggingFilter(System.out));
if (isKerberosAuth == false) {
verifySimpleAuthCancel();
return;
}
final DelegationToken dtoken=new DelegationToken();
String renewer="client2";
dtoken.setRenewer(renewer);
String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML};
for ( final String mediaType : mediaTypes) {
for ( final String contentType : mediaTypes) {
KerberosTestUtils.doAsClient(new Callable(){
@Override public Void call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dtoken,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
response=resource().path("ws").path("v1").path("cluster").path("delegation-token").header(yarnTokenHeader,tok.getToken()).accept(contentType).delete(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
assertTokenCancelled(tok.getToken());
return null;
}
}
);
final DelegationToken tmpToken=KerberosTestUtils.doAsClient(new Callable(){
@Override public DelegationToken call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dtoken,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
return tok;
}
}
);
KerberosTestUtils.doAs(renewer,new Callable(){
@Override public Void call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").header(yarnTokenHeader,tmpToken.getToken()).accept(contentType).delete(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
assertTokenCancelled(tmpToken.getToken());
return null;
}
}
);
final DelegationToken tmpToken2=KerberosTestUtils.doAsClient(new Callable(){
@Override public DelegationToken call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dtoken,mediaType).post(ClientResponse.class);
assertEquals(Status.OK,response.getClientResponseStatus());
DelegationToken tok=getDelegationTokenFromResponse(response);
return tok;
}
}
);
KerberosTestUtils.doAs("client3",new Callable(){
@Override public Void call() throws Exception {
ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").header(yarnTokenHeader,tmpToken2.getToken()).accept(contentType).delete(ClientResponse.class);
assertEquals(Status.FORBIDDEN,response.getClientResponseStatus());
assertValidRMToken(tmpToken2.getToken());
return null;
}
}
);
testCancelTokenBadRequests(mediaType,contentType);
}
}
rm.stop();
return;
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterScheduler() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterScheduler(json);
}
InternalCallVerifier EqualityVerifier
@Test public void testClusterSchedulerSlash() throws JSONException, Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
verifyClusterScheduler(json);
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNonexistNode() throws JSONException, Exception {
rm.registerNode("h1:1234",5120);
rm.registerNode("h2:1235",5121);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid:99").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on non-existent nodeid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
verifyNonexistNodeException(message,type,classname);
}
finally {
rm.stop();
}
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNodesQueryStateInvalid() throws JSONException, Exception {
WebResource r=resource();
rm.registerNode("h1:1234",5120);
rm.registerNode("h2:1235",5121);
try {
r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states","BOGUSSTATE").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception querying invalid state");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringContains("exception message","org.apache.hadoop.yarn.api.records.NodeState.BOGUSSTATE",message);
WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type);
WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname);
}
finally {
rm.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNodesQueryHealthyFalse() throws JSONException, Exception {
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
MockNM nm2=rm.registerNode("h2:1235",5121);
rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states","UNHEALTHY").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("nodes is not null",JSONObject.NULL,json.get("nodes"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testSingleNodesXML() throws JSONException, Exception {
rm.start();
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").path("h1:1234").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("node");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyNodesXML(nodes,nm1);
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testSingleNodeQueryStateLost() throws JSONException, Exception {
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
MockNM nm2=rm.registerNode("h2:1234",5120);
rm.sendNodeStarted(nm1);
rm.sendNodeStarted(nm2);
rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(),NodeState.RUNNING);
rm.sendNodeLost(nm1);
rm.sendNodeLost(nm2);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").path("h2:1234").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
JSONObject info=json.getJSONObject("node");
String id=info.get("id").toString();
assertEquals("Incorrect Node Information.","h2:1234",id);
RMNode rmNode=rm.getRMContext().getInactiveRMNodes().get("h2");
WebServicesTestUtils.checkStringMatch("nodeHTTPAddress","",info.getString("nodeHTTPAddress"));
WebServicesTestUtils.checkStringMatch("state",rmNode.getState().toString(),info.getString("state"));
}
InternalCallVerifier EqualityVerifier
@Test public void testQueryAll() throws Exception {
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
MockNM nm2=rm.registerNode("h2:1235",5121);
MockNM nm3=rm.registerNode("h3:1236",5122);
rm.sendNodeStarted(nm1);
rm.sendNodeStarted(nm3);
rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW);
rm.sendNodeLost(nm3);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states",Joiner.on(',').join(EnumSet.allOf(NodeState.class))).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
JSONObject nodes=json.getJSONObject("nodes");
assertEquals("incorrect number of elements",1,nodes.length());
JSONArray nodeArray=nodes.getJSONArray("node");
assertEquals("incorrect number of elements",3,nodeArray.length());
}
InternalCallVerifier EqualityVerifier
@Test public void testNodesQueryStateLost() throws JSONException, Exception {
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
MockNM nm2=rm.registerNode("h2:1234",5120);
rm.sendNodeStarted(nm1);
rm.sendNodeStarted(nm2);
rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(),NodeState.RUNNING);
rm.sendNodeLost(nm1);
rm.sendNodeLost(nm2);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states",NodeState.LOST.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
JSONObject nodes=json.getJSONObject("nodes");
assertEquals("incorrect number of elements",1,nodes.length());
JSONArray nodeArray=nodes.getJSONArray("node");
assertEquals("incorrect number of elements",2,nodeArray.length());
for (int i=0; i < nodeArray.length(); ++i) {
JSONObject info=nodeArray.getJSONObject(i);
String host=info.get("id").toString().split(":")[0];
RMNode rmNode=rm.getRMContext().getInactiveRMNodes().get(host);
WebServicesTestUtils.checkStringMatch("nodeHTTPAddress","",info.getString("nodeHTTPAddress"));
WebServicesTestUtils.checkStringMatch("state",rmNode.getState().toString(),info.getString("state"));
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNodesQueryNew() throws JSONException, Exception {
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
MockNM nm2=rm.registerNode("h2:1235",5121);
rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states",NodeState.NEW.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject nodes=json.getJSONObject("nodes");
assertEquals("incorrect number of elements",1,nodes.length());
JSONArray nodeArray=nodes.getJSONArray("node");
assertEquals("incorrect number of elements",1,nodeArray.length());
JSONObject info=nodeArray.getJSONObject(0);
verifyNodeInfo(info,nm2);
}
InternalCallVerifier EqualityVerifier
@Test public void testNodesQueryStateNone() throws JSONException, Exception {
WebResource r=resource();
rm.registerNode("h1:1234",5120);
rm.registerNode("h2:1235",5121);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states",NodeState.DECOMMISSIONED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
assertEquals("nodes is not null",JSONObject.NULL,json.get("nodes"));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodes2XML() throws JSONException, Exception {
rm.start();
WebResource r=resource();
rm.registerNode("h1:1234",5120);
rm.registerNode("h2:1235",5121);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("nodes");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("node");
assertEquals("incorrect number of elements",2,nodes.getLength());
rm.stop();
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNonexistNodeXML() throws JSONException, Exception {
rm.registerNode("h1:1234",5120);
rm.registerNode("h2:1235",5121);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid:99").accept(MediaType.APPLICATION_XML).get(JSONObject.class);
fail("should have thrown exception on non-existent nodeid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String msg=response.getEntity(String.class);
System.out.println(msg);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(msg));
Document dom=db.parse(is);
NodeList nodes=dom.getElementsByTagName("RemoteException");
Element element=(Element)nodes.item(0);
String message=WebServicesTestUtils.getXmlString(element,"message");
String type=WebServicesTestUtils.getXmlString(element,"exception");
String classname=WebServicesTestUtils.getXmlString(element,"javaClassName");
verifyNonexistNodeException(message,type,classname);
}
finally {
rm.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testNodesDefaultWithUnHealthyNode() throws JSONException, Exception {
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
MockNM nm2=rm.registerNode("h2:1235",5121);
rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW);
MockNM nm3=rm.registerNode("h3:1236",5122);
rm.NMwaitForState(nm3.getNodeId(),NodeState.NEW);
rm.sendNodeStarted(nm3);
rm.NMwaitForState(nm3.getNodeId(),NodeState.RUNNING);
RMNodeImpl node=(RMNodeImpl)rm.getRMContext().getRMNodes().get(nm3.getNodeId());
NodeHealthStatus nodeHealth=NodeHealthStatus.newInstance(false,"test health report",System.currentTimeMillis());
node.handle(new RMNodeStatusEvent(nm3.getNodeId(),nodeHealth,new ArrayList(),null,null));
rm.NMwaitForState(nm3.getNodeId(),NodeState.UNHEALTHY);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject nodes=json.getJSONObject("nodes");
assertEquals("incorrect number of elements",1,nodes.length());
JSONArray nodeArray=nodes.getJSONArray("node");
assertEquals("incorrect number of elements",3,nodeArray.length());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testInvalidNode() throws JSONException, Exception {
rm.registerNode("h1:1234",5120);
rm.registerNode("h2:1235",5121);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
fail("should have thrown exception on non-existent nodeid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
WebServicesTestUtils.checkStringMatch("exception message","Invalid NodeId \\[node_invalid_foo\\]. Expected host:port",message);
WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type);
WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname);
}
finally {
rm.stop();
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testNodesXML() throws JSONException, Exception {
rm.start();
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType());
String xml=response.getEntity(String.class);
DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance();
DocumentBuilder db=dbf.newDocumentBuilder();
InputSource is=new InputSource();
is.setCharacterStream(new StringReader(xml));
Document dom=db.parse(is);
NodeList nodesApps=dom.getElementsByTagName("nodes");
assertEquals("incorrect number of elements",1,nodesApps.getLength());
NodeList nodes=dom.getElementsByTagName("node");
assertEquals("incorrect number of elements",1,nodes.getLength());
verifyNodesXML(nodes,nm1);
rm.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testNodesQueryRunning() throws JSONException, Exception {
WebResource r=resource();
MockNM nm1=rm.registerNode("h1:1234",5120);
MockNM nm2=rm.registerNode("h2:1235",5121);
rm.sendNodeStarted(nm1);
rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING);
rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW);
ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states","running").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject json=response.getEntity(JSONObject.class);
assertEquals("incorrect number of elements",1,json.length());
JSONObject nodes=json.getJSONObject("nodes");
assertEquals("incorrect number of elements",1,nodes.length());
JSONArray nodeArray=nodes.getJSONArray("node");
assertEquals("incorrect number of elements",1,nodeArray.length());
}
UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier
@Test public void testNonexistNodeDefault() throws JSONException, Exception {
rm.registerNode("h1:1234",5120);
rm.registerNode("h2:1235",5121);
WebResource r=resource();
try {
r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid:99").get(JSONObject.class);
fail("should have thrown exception on non-existent nodeid");
}
catch ( UniformInterfaceException ue) {
ClientResponse response=ue.getResponse();
assertEquals(Status.NOT_FOUND,response.getClientResponseStatus());
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
JSONObject msg=response.getEntity(JSONObject.class);
JSONObject exception=msg.getJSONObject("RemoteException");
assertEquals("incorrect number of elements",3,exception.length());
String message=exception.getString("message");
String type=exception.getString("exception");
String classname=exception.getString("javaClassName");
verifyNonexistNodeException(message,type,classname);
}
finally {
rm.stop();
}
}
InternalCallVerifier EqualityVerifier
@Test public void testCacheSizes(){
Configuration conf=new Configuration();
assertEquals(10000,LeveldbTimelineStore.getStartTimeReadCacheSize(conf));
assertEquals(10000,LeveldbTimelineStore.getStartTimeWriteCacheSize(conf));
conf.setInt(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,10001);
assertEquals(10001,LeveldbTimelineStore.getStartTimeReadCacheSize(conf));
conf=new Configuration();
conf.setInt(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,10002);
assertEquals(10002,LeveldbTimelineStore.getStartTimeWriteCacheSize(conf));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testRootDirPermission() throws IOException {
FileSystem fs=FileSystem.getLocal(new YarnConfiguration());
FileStatus file=fs.getFileStatus(new Path(fsPath.getAbsolutePath(),LeveldbTimelineStore.FILENAME));
assertNotNull(file);
assertEquals(LeveldbTimelineStore.LEVELDB_DIR_UMASK,file.getPermission());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testDeleteEntitiesPrimaryFilters() throws IOException, InterruptedException {
Map primaryFilter=Collections.singletonMap("user",Collections.singleton((Object)"otheruser"));
TimelineEntities atsEntities=new TimelineEntities();
atsEntities.setEntities(Collections.singletonList(createEntity(entityId1b,entityType1,789l,Collections.singletonList(ev2),null,primaryFilter,null)));
TimelinePutResponse response=store.put(atsEntities);
assertEquals(0,response.getErrors().size());
NameValuePair pfPair=new NameValuePair("user","otheruser");
List entities=getEntitiesWithPrimaryFilter("type_1",pfPair);
assertEquals(1,entities.size());
verifyEntityInfo(entityId1b,entityType1,Collections.singletonList(ev2),EMPTY_REL_ENTITIES,primaryFilter,EMPTY_MAP,entities.get(0));
entities=getEntitiesWithPrimaryFilter("type_1",userFilter);
assertEquals(2,entities.size());
verifyEntityInfo(entityId1,entityType1,events1,EMPTY_REL_ENTITIES,primaryFilters,otherInfo,entities.get(0));
verifyEntityInfo(entityId1b,entityType1,events1,EMPTY_REL_ENTITIES,primaryFilters,otherInfo,entities.get(1));
((LeveldbTimelineStore)store).discardOldEntities(-123l);
assertEquals(1,getEntitiesWithPrimaryFilter("type_1",pfPair).size());
assertEquals(2,getEntitiesWithPrimaryFilter("type_1",userFilter).size());
((LeveldbTimelineStore)store).discardOldEntities(123l);
assertEquals(0,getEntities("type_1").size());
assertEquals(0,getEntities("type_2").size());
assertEquals(0,((LeveldbTimelineStore)store).getEntityTypes().size());
assertEquals(0,getEntitiesWithPrimaryFilter("type_1",pfPair).size());
assertEquals(0,getEntitiesWithPrimaryFilter("type_1",userFilter).size());
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCheckVersion() throws IOException {
LeveldbTimelineStore dbStore=(LeveldbTimelineStore)store;
Version defaultVersion=dbStore.getCurrentVersion();
Assert.assertEquals(defaultVersion,dbStore.loadVersion());
Version compatibleVersion=Version.newInstance(defaultVersion.getMajorVersion(),defaultVersion.getMinorVersion() + 2);
dbStore.storeVersion(compatibleVersion);
Assert.assertEquals(compatibleVersion,dbStore.loadVersion());
restartTimelineStore();
dbStore=(LeveldbTimelineStore)store;
Assert.assertEquals(defaultVersion,dbStore.loadVersion());
Version incompatibleVersion=Version.newInstance(defaultVersion.getMajorVersion() + 1,defaultVersion.getMinorVersion());
dbStore.storeVersion(incompatibleVersion);
try {
restartTimelineStore();
Assert.fail("Incompatible version, should expect fail here.");
}
catch ( ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for timeline store"));
}
}
InternalCallVerifier BooleanVerifier
@Test public void testYarnACLsNotEnabled() throws Exception {
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,false);
TimelineACLsManager timelineACLsManager=new TimelineACLsManager(conf);
TimelineEntity entity=new TimelineEntity();
entity.addPrimaryFilter(TimelineStore.SystemFilter.ENTITY_OWNER.toString(),"owner");
Assert.assertTrue("Always true when ACLs are not enabled",timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("user"),entity));
}
InternalCallVerifier BooleanVerifier
@Test public void testYarnACLsEnabled() throws Exception {
Configuration conf=new YarnConfiguration();
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,true);
conf.set(YarnConfiguration.YARN_ADMIN_ACL,"admin");
TimelineACLsManager timelineACLsManager=new TimelineACLsManager(conf);
TimelineEntity entity=new TimelineEntity();
entity.addPrimaryFilter(TimelineStore.SystemFilter.ENTITY_OWNER.toString(),"owner");
Assert.assertTrue("Owner should be allowed to access",timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("owner"),entity));
Assert.assertFalse("Other shouldn't be allowed to access",timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("other"),entity));
Assert.assertTrue("Admin should be allowed to access",timelineACLsManager.checkAccess(UserGroupInformation.createRemoteUser("admin"),entity));
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetFilterParameters(){
Configuration conf=new Configuration();
conf.set(CrossOriginFilterInitializer.PREFIX + "rootparam","rootvalue");
conf.set(CrossOriginFilterInitializer.PREFIX + "nested.param","nestedvalue");
conf.set("outofscopeparam","outofscopevalue");
Map filterParameters=CrossOriginFilterInitializer.getFilterParameters(conf);
String rootvalue=filterParameters.get(CrossOriginFilterInitializer.PREFIX + "rootparam");
String nestedvalue=filterParameters.get(CrossOriginFilterInitializer.PREFIX + "nested.param");
String outofscopeparam=filterParameters.get("outofscopeparam");
Assert.assertEquals("Could not find filter parameter","rootvalue",rootvalue);
Assert.assertEquals("Could not find filter parameter","nestedvalue",nestedvalue);
Assert.assertNull("Found unexpected value in filter parameters",outofscopeparam);
}
InternalCallVerifier EqualityVerifier
@Test public void testPrimaryFilterNumericStringWithQuotes(){
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","other:\"123abc\"").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
InternalCallVerifier EqualityVerifier
@Test public void testPrimaryFilterNumericString(){
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","other:123abc").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(0,response.getEntity(TimelineEntities.class).getEntities().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testFromTs() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromTs",Long.toString(beforeTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(0,response.getEntity(TimelineEntities.class).getEntities().size());
response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromTs",Long.toString(System.currentTimeMillis())).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(2,response.getEntity(TimelineEntities.class).getEntities().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testGetEntitiesWithYarnACLsEnabled(){
AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
entity.setEntityId("test id 4");
entity.setEntityType("test type 4");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
entities=new TimelineEntities();
entity=new TimelineEntity();
entity.setEntityId("test id 5");
entity.setEntityType("test type 4");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
r=resource();
response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").path("test type 4").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
entities=response.getEntity(TimelineEntities.class);
assertEquals(1,entities.getEntities().size());
assertEquals("test type 4",entities.getEntities().get(0).getEntityType());
assertEquals("test id 5",entities.getEntities().get(0).getEntityId());
}
finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testPrimaryFilterLong(){
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","long:" + Long.toString((long)Integer.MAX_VALUE + 1l)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetEvents() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("events").queryParam("entityId","id_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelineEvents events=response.getEntity(TimelineEvents.class);
Assert.assertNotNull(events);
Assert.assertEquals(1,events.getAllEvents().size());
TimelineEvents.EventsOfOneEntity partEvents=events.getAllEvents().get(0);
Assert.assertEquals(2,partEvents.getEvents().size());
TimelineEvent event1=partEvents.getEvents().get(0);
Assert.assertEquals(456l,event1.getTimestamp());
Assert.assertEquals("end_event",event1.getEventType());
Assert.assertEquals(1,event1.getEventInfo().size());
TimelineEvent event2=partEvents.getEvents().get(1);
Assert.assertEquals(123l,event2.getTimestamp());
Assert.assertEquals("start_event",event2.getEventType());
Assert.assertEquals(0,event2.getEventInfo().size());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntitiesWithYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
entity.setEntityId("test id 2");
entity.setEntityType("test type 2");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelinePutResponse putResponse=response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResponse);
Assert.assertEquals(0,putResponse.getErrors().size());
response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
putResponse=response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResponse);
Assert.assertEquals(1,putResponse.getErrors().size());
Assert.assertEquals(TimelinePutResponse.TimelinePutError.ACCESS_DENIED,putResponse.getErrors().get(0).getErrorCode());
}
finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testPrimaryFilterString(){
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","user:username").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
InternalCallVerifier EqualityVerifier
@Test public void testGetEntities() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetEntityFields1() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("id_1").queryParam("fields","events,otherinfo").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelineEntity entity=response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("id_1",entity.getEntityId());
Assert.assertEquals("type_1",entity.getEntityType());
Assert.assertEquals(123l,entity.getStartTime().longValue());
Assert.assertEquals(2,entity.getEvents().size());
Assert.assertEquals(0,entity.getPrimaryFilters().size());
Assert.assertEquals(4,entity.getOtherInfo().size());
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testPostEntitiesWithPrimaryFilter() throws Exception {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
Map filters=new HashMap();
filters.put(TimelineStore.SystemFilter.ENTITY_OWNER.toString(),new HashSet());
entity.setPrimaryFilters(filters);
entity.setEntityId("test id 6");
entity.setEntityType("test type 6");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
TimelinePutResponse putResposne=response.getEntity(TimelinePutResponse.class);
Assert.assertEquals(1,putResposne.getErrors().size());
List errors=putResposne.getErrors();
Assert.assertEquals(TimelinePutResponse.TimelinePutError.SYSTEM_FILTER_CONFLICT,errors.get(0).getErrorCode());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetEntityWithYarnACLsEnabled() throws Exception {
AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
entity.setEntityId("test id 3");
entity.setEntityType("test type 3");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
entity=response.getEntity(TimelineEntity.class);
Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("fields","relatedentities").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
entity=response.getEntity(TimelineEntity.class);
Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("fields","primaryfilters").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
entity=response.getEntity(TimelineEntity.class);
Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString()));
response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(ClientResponse.Status.NOT_FOUND,response.getClientResponseStatus());
}
finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetEntityFields2() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("id_1").queryParam("fields","lasteventonly," + "primaryfilters,relatedentities").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelineEntity entity=response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("id_1",entity.getEntityId());
Assert.assertEquals("type_1",entity.getEntityType());
Assert.assertEquals(123l,entity.getStartTime().longValue());
Assert.assertEquals(1,entity.getEvents().size());
Assert.assertEquals(4,entity.getPrimaryFilters().size());
Assert.assertEquals(0,entity.getOtherInfo().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testFromId() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromId","id_2").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(1,response.getEntity(TimelineEntities.class).getEntities().size());
response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromId","id_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(2,response.getEntity(TimelineEntities.class).getEntities().size());
}
InternalCallVerifier EqualityVerifier
@Test public void testSecondaryFilters(){
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("secondaryFilter","user:username,appname:" + Integer.toString(Integer.MAX_VALUE)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAbout() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelineWebServices.AboutInfo about=response.getEntity(TimelineWebServices.AboutInfo.class);
Assert.assertNotNull(about);
Assert.assertEquals("Timeline API",about.getAbout());
}
APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPostEntities() throws Exception {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
entity.setEntityId("test id 1");
entity.setEntityType("test type 1");
entity.setStartTime(System.currentTimeMillis());
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
assertEquals(ClientResponse.Status.FORBIDDEN,response.getClientResponseStatus());
response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelinePutResponse putResposne=response.getEntity(TimelinePutResponse.class);
Assert.assertNotNull(putResposne);
Assert.assertEquals(0,putResposne.getErrors().size());
response=r.path("ws").path("v1").path("timeline").path("test type 1").path("test id 1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
entity=response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("test id 1",entity.getEntityId());
Assert.assertEquals("test type 1",entity.getEntityType());
}
InternalCallVerifier EqualityVerifier
@Test public void testGetEventsWithYarnACLsEnabled(){
AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager);
try {
TimelineEntities entities=new TimelineEntities();
TimelineEntity entity=new TimelineEntity();
entity.setEntityId("test id 5");
entity.setEntityType("test type 5");
entity.setStartTime(System.currentTimeMillis());
TimelineEvent event=new TimelineEvent();
event.setEventType("event type 1");
event.setTimestamp(System.currentTimeMillis());
entity.addEvent(event);
entities.addEntity(entity);
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
entities=new TimelineEntities();
entity=new TimelineEntity();
entity.setEntityId("test id 6");
entity.setEntityType("test type 5");
entity.setStartTime(System.currentTimeMillis());
event=new TimelineEvent();
event.setEventType("event type 2");
event.setTimestamp(System.currentTimeMillis());
entity.addEvent(event);
entities.addEntity(entity);
r=resource();
response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities);
response=r.path("ws").path("v1").path("timeline").path("test type 5").path("events").queryParam("user.name","other").queryParam("entityId","test id 5,test id 6").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelineEvents events=response.getEntity(TimelineEvents.class);
assertEquals(1,events.getAllEvents().size());
assertEquals("test id 6",events.getAllEvents().get(0).getEntityId());
}
finally {
timelineACLsManager.setAdminACLsManager(oldAdminACLsManager);
}
}
InternalCallVerifier EqualityVerifier
@Test public void testPrimaryFilterInteger(){
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","appname:" + Integer.toString(Integer.MAX_VALUE)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
verifyEntities(response.getEntity(TimelineEntities.class));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testGetEntity() throws Exception {
WebResource r=resource();
ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("id_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType());
TimelineEntity entity=response.getEntity(TimelineEntity.class);
Assert.assertNotNull(entity);
Assert.assertEquals("id_1",entity.getEntityId());
Assert.assertEquals("type_1",entity.getEntityType());
Assert.assertEquals(123l,entity.getStartTime().longValue());
Assert.assertEquals(2,entity.getEvents().size());
Assert.assertEquals(4,entity.getPrimaryFilters().size());
Assert.assertEquals(4,entity.getOtherInfo().size());
}
InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testPutEntities() throws Exception {
TestTimelineClient client=new TestTimelineClient();
try {
client.init(conf);
client.start();
TimelineEntity expectedEntity=new TimelineEntity();
expectedEntity.setEntityType("test entity type");
expectedEntity.setEntityId("test entity id");
TimelineEvent event=new TimelineEvent();
event.setEventType("test event type");
event.setTimestamp(0L);
expectedEntity.addEvent(event);
TimelinePutResponse response=client.putEntities(expectedEntity);
Assert.assertEquals(0,response.getErrors().size());
Assert.assertTrue(client.resp.toString().contains("https"));
TimelineEntity actualEntity=store.getEntity(expectedEntity.getEntityId(),expectedEntity.getEntityType(),EnumSet.allOf(Field.class));
Assert.assertNotNull(actualEntity);
Assert.assertEquals(expectedEntity.getEntityId(),actualEntity.getEntityId());
Assert.assertEquals(expectedEntity.getEntityType(),actualEntity.getEntityType());
}
finally {
client.stop();
client.close();
}
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier PublicFieldVerifier HybridVerifier
@Test public void testExceptionHandling() throws Exception {
InvocationHandler rtExcHandler=new InvocationHandler(){
@Override public Object invoke( Object proxy, Method method, Object[] args) throws Throwable {
throw new RuntimeException("forced runtime error");
}
}
;
DBIterator dbiter=(DBIterator)Proxy.newProxyInstance(DBIterator.class.getClassLoader(),new Class[]{DBIterator.class},rtExcHandler);
LeveldbIterator iter=new LeveldbIterator(dbiter);
for ( CallInfo ci : RTEXC_METHODS) {
Method method=iter.getClass().getMethod(ci.methodName,ci.argTypes);
assertNotNull("unable to locate method " + ci.methodName,method);
try {
method.invoke(iter,ci.args);
fail("operation should have thrown");
}
catch ( InvocationTargetException ite) {
Throwable exc=ite.getTargetException();
assertTrue("Method " + ci.methodName + " threw non-DBException: "+ exc,exc instanceof DBException);
assertFalse("Method " + ci.methodName + " double-wrapped DBException",exc.getCause() instanceof DBException);
}
}
try {
iter.close();
fail("operation shoul have thrown");
}
catch ( IOException e) {
}
}
BranchVerifier InternalCallVerifier EqualityVerifier
@Test public void testStart(){
assertEquals(STATE.INITED,webAppProxy.getServiceState());
webAppProxy.start();
for ( Service service : webAppProxy.getServices()) {
if (service instanceof WebAppProxy) {
assertEquals(((WebAppProxy)service).getBindAddress(),proxyAddress);
}
}
assertEquals(STATE.STARTED,webAppProxy.getServiceState());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier
@Test(timeout=5000) public void testWebAppProxyServlet() throws Exception {
Configuration configuration=new Configuration();
configuration.set(YarnConfiguration.PROXY_ADDRESS,"localhost:9090");
configuration.setInt("hadoop.http.max.threads",5);
WebAppProxyServerForTest proxy=new WebAppProxyServerForTest();
proxy.init(configuration);
proxy.start();
int proxyPort=proxy.proxy.proxyServer.getConnectorAddress(0).getPort();
AppReportFetcherForTest appReportFetcher=proxy.proxy.appReportFetcher;
try {
URL wrongUrl=new URL("http://localhost:" + proxyPort + "/proxy/app");
HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,proxyConn.getResponseCode());
URL url=new URL("http://localhost:" + proxyPort + "/proxy/application_00_0");
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode());
assertTrue(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true"));
appReportFetcher.answer=1;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true"));
appReportFetcher.answer=4;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode());
assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true"));
appReportFetcher.answer=2;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode());
String s=readInputStream(proxyConn.getInputStream());
assertTrue(s.contains("to continue to an Application Master web interface owned by"));
assertTrue(s.contains("WARNING: The following page may not be safe!"));
appReportFetcher.answer=3;
proxyConn=(HttpURLConnection)url.openConnection();
proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true");
proxyConn.connect();
assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode());
}
finally {
proxy.close();
}
}
UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
/**
* Test AmIpFilter
*/
@Test(timeout=1000) @SuppressWarnings("deprecation") public void testFilter() throws Exception {
Map params=new HashMap();
params.put(AmIpFilter.PROXY_HOST,proxyHost);
params.put(AmIpFilter.PROXY_URI_BASE,proxyUri);
FilterConfig config=new DummyFilterConfig(params);
FilterChain chain=new FilterChain(){
@Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException {
doFilterRequest=servletRequest.getClass().getName();
if (servletRequest instanceof AmIpServletRequestWrapper) {
servletWrapper=(AmIpServletRequestWrapper)servletRequest;
}
}
}
;
AmIpFilter testFilter=new AmIpFilter();
testFilter.init(config);
HttpServletResponseForTest response=new HttpServletResponseForTest();
ServletRequest failRequest=Mockito.mock(ServletRequest.class);
try {
testFilter.doFilter(failRequest,response,chain);
fail();
}
catch ( ServletException e) {
assertEquals("This filter only works for HTTP/HTTPS",e.getMessage());
}
HttpServletRequest request=Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getRemoteAddr()).thenReturn("redirect");
Mockito.when(request.getRequestURI()).thenReturn("/redirect");
testFilter.doFilter(request,response,chain);
assertEquals("http://bogus/redirect",response.getRedirect());
Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1");
testFilter.doFilter(request,response,chain);
assertTrue(doFilterRequest.contains("javax.servlet.http.HttpServletRequest"));
Cookie[] cookies=new Cookie[1];
cookies[0]=new Cookie(WebAppProxyServlet.PROXY_USER_COOKIE_NAME,"user");
Mockito.when(request.getCookies()).thenReturn(cookies);
testFilter.doFilter(request,response,chain);
assertEquals("org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpServletRequestWrapper",doFilterRequest);
assertEquals("user",servletWrapper.getUserPrincipal().getName());
assertEquals("user",servletWrapper.getRemoteUser());
assertFalse(servletWrapper.isUserInRole(""));
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
@Test public void testGetProxyHostsAndPortsForAmFilter(){
Configuration conf=new Configuration(false);
List proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1,proxyHosts.size());
assertEquals(WebAppUtils.getResolvedRMWebAppURLWithoutScheme(conf),proxyHosts.get(0));
conf=new Configuration(false);
conf.set(YarnConfiguration.PROXY_ADDRESS,"host1:1000");
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000");
proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1,proxyHosts.size());
assertEquals("host1:1000",proxyHosts.get(0));
conf=new Configuration(false);
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"host2:2000");
proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(1,proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host2:2000",proxyHosts.get(0));
conf=new Configuration(false);
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm4","dummy");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1","host5:5000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2","host6:6000");
proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(3,proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host2:2000",proxyHosts.get(0));
assertEquals("host3:3000",proxyHosts.get(1));
assertEquals("host4:4000",proxyHosts.get(2));
conf=new Configuration(false);
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,HttpConfig.Policy.HTTPS_ONLY.toString());
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true);
conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3,dummy");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000");
conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1","host5:5000");
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2","host6:6000");
proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
assertEquals(2,proxyHosts.size());
Collections.sort(proxyHosts);
assertEquals("host5:5000",proxyHosts.get(0));
assertEquals("host6:6000",proxyHosts.get(1));
}
InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier
@Test public void testAMSimulator() throws Exception {
MockAMSimulator app=new MockAMSimulator();
List containers=new ArrayList();
app.init(1,1000,containers,rm,null,0,1000000l,"user1","default",false,"app1");
app.firstStep();
Assert.assertEquals(1,rm.getRMContext().getRMApps().size());
Assert.assertNotNull(rm.getRMContext().getRMApps().get(app.appId));
app.lastStep();
}
InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testNMSimulator() throws Exception {
NMSimulator node1=new NMSimulator();
node1.init("rack1/node1",GB * 10,10,0,1000,rm);
node1.middleStep();
Assert.assertEquals(1,rm.getResourceScheduler().getNumClusterNodes());
Assert.assertEquals(GB * 10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableMB());
Assert.assertEquals(10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableVirtualCores());
ContainerId cId1=newContainerId(1,1,1);
Container container1=Container.newInstance(cId1,null,null,Resources.createResource(GB,1),null,null);
node1.addNewContainer(container1,100000l);
Assert.assertTrue("Node1 should have one running container.",node1.getRunningContainers().containsKey(cId1));
ContainerId cId2=newContainerId(2,1,1);
Container container2=Container.newInstance(cId2,null,null,Resources.createResource(GB,1),null,null);
node1.addNewContainer(container2,-1l);
Assert.assertTrue("Node1 should have one running AM container",node1.getAMContainers().contains(cId2));
node1.cleanupContainer(cId1);
Assert.assertTrue("Container1 should be removed from Node1.",node1.getCompletedContainers().contains(cId1));
node1.cleanupContainer(cId2);
Assert.assertFalse("Container2 should be removed from Node1.",node1.getAMContainers().contains(cId2));
}
APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=10000) public void testDownloadBadPublic() throws IOException, URISyntaxException, InterruptedException {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
Map rsrcVis=new HashMap();
Random rand=new Random();
long sharedSeed=rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map> pending=new HashMap>();
ExecutorService exec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
int size=512;
LocalResourceVisibility vis=LocalResourceVisibility.PUBLIC;
Path path=new Path(basedir,"test-file");
LocalResource rsrc=createFile(files,path,size,rand,vis);
rsrcVis.put(rsrc,vis);
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),size,conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
pending.put(rsrc,exec.submit(fsd));
exec.shutdown();
while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
Assert.assertTrue(pending.get(rsrc).isDone());
try {
for ( Map.Entry> p : pending.entrySet()) {
p.getValue().get();
Assert.fail("We localized a file that is not public.");
}
}
catch ( ExecutionException e) {
Assert.assertTrue(e.getCause() instanceof IOException);
}
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testUniqueDestinationPath() throws Exception {
Configuration conf=new Configuration();
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
ExecutorService singleThreadedExec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
Path p=new Path(basedir,"dir" + 0 + ".jar");
LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE;
LocalResource rsrc=createJar(files,p,vis);
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
Future rPath=singleThreadedExec.submit(fsd);
singleThreadedExec.shutdown();
while (!singleThreadedExec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
Assert.assertTrue(rPath.isDone());
Assert.assertEquals(destPath,rPath.get().getParent());
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test(timeout=10000) public void testDownload() throws IOException, URISyntaxException, InterruptedException {
Configuration conf=new Configuration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077");
FileContext files=FileContext.getLocalFSFileContext(conf);
final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName()));
files.mkdir(basedir,null,true);
conf.setStrings(TestFSDownload.class.getName(),basedir.toString());
Map rsrcVis=new HashMap();
Random rand=new Random();
long sharedSeed=rand.nextLong();
rand.setSeed(sharedSeed);
System.out.println("SEED: " + sharedSeed);
Map> pending=new HashMap>();
ExecutorService exec=Executors.newSingleThreadExecutor();
LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName());
int[] sizes=new int[10];
for (int i=0; i < 10; ++i) {
sizes[i]=rand.nextInt(512) + 512;
LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE;
if (i % 2 == 1) {
vis=LocalResourceVisibility.APPLICATION;
}
Path p=new Path(basedir,"" + i);
LocalResource rsrc=createFile(files,p,sizes[i],rand,vis);
rsrcVis.put(rsrc,vis);
Path destPath=dirs.getLocalPathForWrite(basedir.toString(),sizes[i],conf);
destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet()));
FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc);
pending.put(rsrc,exec.submit(fsd));
}
exec.shutdown();
while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ;
for ( Future path : pending.values()) {
Assert.assertTrue(path.isDone());
}
try {
for ( Map.Entry> p : pending.entrySet()) {
Path localized=p.getValue().get();
assertEquals(sizes[Integer.valueOf(localized.getName())],p.getKey().getSize());
FileStatus status=files.getFileStatus(localized.getParent());
FsPermission perm=status.getPermission();
assertEquals("Cache directory permissions are incorrect",new FsPermission((short)0755),perm);
status=files.getFileStatus(localized);
perm=status.getPermission();
System.out.println("File permission " + perm + " for rsrc vis "+ p.getKey().getVisibility().name());
assert (rsrcVis.containsKey(p.getKey()));
Assert.assertTrue("Private file should be 500",perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort());
}
}
catch ( ExecutionException e) {
throw new IOException("Failed exec",e);
}
}
InternalCallVerifier EqualityVerifier
/**
* Test parsing /proc/stat and /proc/cpuinfo
* @throws IOException
*/
@Test public void parsingProcStatAndCpuFile() throws IOException {
long numProcessors=8;
long cpuFrequencyKHz=2392781;
String fileContent="";
for (int i=0; i < numProcessors; i++) {
fileContent+=String.format(CPUINFO_FORMAT,i,cpuFrequencyKHz / 1000D) + "\n";
}
File tempFile=new File(FAKE_CPUFILE);
tempFile.deleteOnExit();
FileWriter fWriter=new FileWriter(FAKE_CPUFILE);
fWriter.write(fileContent);
fWriter.close();
assertEquals(plugin.getNumProcessors(),numProcessors);
assertEquals(plugin.getCpuFrequency(),cpuFrequencyKHz);
long uTime=54972994;
long nTime=188860;
long sTime=19803373;
tempFile=new File(FAKE_STATFILE);
tempFile.deleteOnExit();
updateStatFile(uTime,nTime,sTime);
assertEquals(plugin.getCumulativeCpuTime(),FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
assertEquals(plugin.getCpuUsage(),(float)(LinuxResourceCalculatorPlugin.UNAVAILABLE),0.0);
uTime+=100L;
plugin.advanceTime(200L);
updateStatFile(uTime,nTime,sTime);
assertEquals(plugin.getCumulativeCpuTime(),FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
assertEquals(plugin.getCpuUsage(),6.25F,0.0);
uTime+=600L;
plugin.advanceTime(300L);
updateStatFile(uTime,nTime,sTime);
assertEquals(plugin.getCpuUsage(),25F,0.0);
uTime+=1L;
plugin.advanceTime(1L);
updateStatFile(uTime,nTime,sTime);
assertEquals(plugin.getCumulativeCpuTime(),FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
assertEquals(plugin.getCpuUsage(),25F,0.0);
}
InternalCallVerifier EqualityVerifier
/**
* Test parsing /proc/meminfo
* @throws IOException
*/
@Test public void parsingProcMemFile() throws IOException {
long memTotal=4058864L;
long memFree=99632L;
long inactive=567732L;
long swapTotal=2096472L;
long swapFree=1818480L;
File tempFile=new File(FAKE_MEMFILE);
tempFile.deleteOnExit();
FileWriter fWriter=new FileWriter(FAKE_MEMFILE);
fWriter.write(String.format(MEMINFO_FORMAT,memTotal,memFree,inactive,swapTotal,swapFree));
fWriter.close();
assertEquals(plugin.getAvailablePhysicalMemorySize(),1024L * (memFree + inactive));
assertEquals(plugin.getAvailableVirtualMemorySize(),1024L * (memFree + inactive + swapFree));
assertEquals(plugin.getPhysicalMemorySize(),1024L * memTotal);
assertEquals(plugin.getVirtualMemorySize(),1024L * (memTotal + swapTotal));
}
APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier HybridVerifier
@Test(timeout=30000) public void testProcessTree() throws Exception {
try {
Assert.assertTrue(ProcfsBasedProcessTree.isAvailable());
}
catch ( Exception e) {
LOG.info(StringUtils.stringifyException(e));
Assert.assertTrue("ProcfsBaseProcessTree should be available on Linux",false);
return;
}
Random rm=new Random();
File tempFile=new File(TEST_ROOT_DIR,getClass().getName() + "_shellScript_" + rm.nextInt()+ ".sh");
tempFile.deleteOnExit();
shellScript=TEST_ROOT_DIR + File.separator + tempFile.getName();
tempFile=new File(TEST_ROOT_DIR,getClass().getName() + "_pidFile_" + rm.nextInt()+ ".pid");
tempFile.deleteOnExit();
pidFile=TEST_ROOT_DIR + File.separator + tempFile.getName();
lowestDescendant=TEST_ROOT_DIR + File.separator + "lowestDescendantPidFile";
try {
FileWriter fWriter=new FileWriter(shellScript);
fWriter.write("# rogue task\n" + "sleep 1\n" + "echo hello\n"+ "if [ $1 -ne 0 ]\n"+ "then\n"+ " sh " + shellScript + " $(($1-1))\n"+ "else\n"+ " echo $$ > "+ lowestDescendant+ "\n"+ " while true\n do\n"+ " sleep 5\n"+ " done\n"+ "fi");
fWriter.close();
}
catch ( IOException ioe) {
LOG.info("Error: " + ioe);
return;
}
Thread t=new RogueTaskThread();
t.start();
String pid=getRogueTaskPID();
LOG.info("Root process pid: " + pid);
ProcfsBasedProcessTree p=createProcessTree(pid);
p.updateProcessTree();
LOG.info("ProcessTree: " + p.toString());
File leaf=new File(lowestDescendant);
while (!leaf.exists()) {
try {
Thread.sleep(500);
}
catch ( InterruptedException ie) {
break;
}
}
p.updateProcessTree();
LOG.info("ProcessTree: " + p.toString());
String processTreeDump=p.getProcessTreeDump();
destroyProcessTree(pid);
boolean isAlive=true;
for (int tries=100; tries > 0; tries--) {
if (isSetsidAvailable()) {
isAlive=isAnyProcessInTreeAlive(p);
}
else {
isAlive=isAlive(pid);
}
if (!isAlive) {
break;
}
Thread.sleep(100);
}
if (isAlive) {
fail("ProcessTree shouldn't be alive");
}
LOG.info("Process-tree dump follows: \n" + processTreeDump);
Assert.assertTrue("Process-tree dump doesn't start with a proper header",processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i=N; i >= 0; i--) {
String cmdLineDump="\\|- [0-9]+ [0-9]+ [0-9]+ [0-9]+ \\(sh\\)" + " [0-9]+ [0-9]+ [0-9]+ [0-9]+ sh " + shellScript + " "+ i;
Pattern pat=Pattern.compile(cmdLineDump);
Matcher mat=pat.matcher(processTreeDump);
Assert.assertTrue("Process-tree dump doesn't contain the cmdLineDump of " + i + "th process!",mat.find());
}
try {
t.join(2000);
LOG.info("RogueTaskThread successfully joined.");
}
catch ( InterruptedException ie) {
LOG.info("Interrupted while joining RogueTaskThread.");
}
p.updateProcessTree();
Assert.assertFalse("ProcessTree must have been gone",isAlive(pid));
Assert.assertTrue("Cumulative vmem for the gone-process is " + p.getCumulativeVmem() + " . It should be zero.",p.getCumulativeVmem() == 0);
Assert.assertTrue(p.toString().equals("[ ]"));
}
APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier
/**
* Test the correctness of process-tree dump.
* @throws IOException
*/
@Test(timeout=30000) public void testProcessTreeDump() throws IOException {
String[] pids={"100","200","300","400","500","600"};
File procfsRootDir=new File(TEST_ROOT_DIR,"proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir,pids);
int numProcesses=pids.length;
ProcessStatInfo[] procInfos=new ProcessStatInfo[numProcesses];
procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","1000","200"});
procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","2000","400"});
procInfos[2]=new ProcessStatInfo(new String[]{"300","proc3","200","100","100","300000","300","3000","600"});
procInfos[3]=new ProcessStatInfo(new String[]{"400","proc4","200","100","100","400000","400","4000","800"});
procInfos[4]=new ProcessStatInfo(new String[]{"500","proc5","400","100","100","400000","400","4000","800"});
procInfos[5]=new ProcessStatInfo(new String[]{"600","proc6","1","1","1","400000","400","4000","800"});
ProcessTreeSmapMemInfo[] memInfos=new ProcessTreeSmapMemInfo[6];
memInfos[0]=new ProcessTreeSmapMemInfo("100");
memInfos[1]=new ProcessTreeSmapMemInfo("200");
memInfos[2]=new ProcessTreeSmapMemInfo("300");
memInfos[3]=new ProcessTreeSmapMemInfo("400");
memInfos[4]=new ProcessTreeSmapMemInfo("500");
memInfos[5]=new ProcessTreeSmapMemInfo("600");
String[] cmdLines=new String[numProcesses];
cmdLines[0]="proc1 arg1 arg2";
cmdLines[1]="proc2 arg3 arg4";
cmdLines[2]="proc3 arg5 arg6";
cmdLines[3]="proc4 arg7 arg8";
cmdLines[4]="proc5 arg9 arg10";
cmdLines[5]="proc6 arg11 arg12";
createMemoryMappingInfo(memInfos);
writeStatFiles(procfsRootDir,pids,procInfos,memInfos);
writeCmdLineFiles(procfsRootDir,pids,cmdLines);
ProcfsBasedProcessTree processTree=createProcessTree("100",procfsRootDir.getAbsolutePath());
processTree.updateProcessTree();
String processTreeDump=processTree.getProcessTreeDump();
LOG.info("Process-tree dump follows: \n" + processTreeDump);
Assert.assertTrue("Process-tree dump doesn't start with a proper header",processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i=0; i < 5; i++) {
ProcessStatInfo p=procInfos[i];
Assert.assertTrue("Process-tree dump doesn't contain the cmdLineDump of process " + p.pid,processTreeDump.contains("\t|- " + p.pid + " "+ p.ppid+ " "+ p.pgrpId+ " "+ p.session+ " ("+ p.name+ ") "+ p.utime+ " "+ p.stime+ " "+ p.vmem+ " "+ p.rssmemPage+ " "+ cmdLines[i]));
}
ProcessStatInfo p=procInfos[5];
Assert.assertFalse("Process-tree dump shouldn't contain the cmdLineDump of process " + p.pid,processTreeDump.contains("\t|- " + p.pid + " "+ p.ppid+ " "+ p.pgrpId+ " "+ p.session+ " ("+ p.name+ ") "+ p.utime+ " "+ p.stime+ " "+ p.vmem+ " "+ cmdLines[5]));
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
APIUtilityVerifier InternalCallVerifier EqualityVerifier
/**
* A basic test that creates a few process directories and writes stat files.
* Verifies that the cpu time and memory is correctly computed.
* @throws IOExceptionif there was a problem setting up the fake procfs directories or
* files.
*/
@Test(timeout=30000) public void testCpuAndMemoryForProcessTree() throws IOException {
String[] pids={"100","200","300","400"};
File procfsRootDir=new File(TEST_ROOT_DIR,"proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir,pids);
ProcessStatInfo[] procInfos=new ProcessStatInfo[4];
procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","1000","200"});
procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","2000","400"});
procInfos[2]=new ProcessStatInfo(new String[]{"300","proc3","200","100","100","300000","300","3000","600"});
procInfos[3]=new ProcessStatInfo(new String[]{"400","proc4","1","400","400","400000","400","4000","800"});
ProcessTreeSmapMemInfo[] memInfo=new ProcessTreeSmapMemInfo[4];
memInfo[0]=new ProcessTreeSmapMemInfo("100");
memInfo[1]=new ProcessTreeSmapMemInfo("200");
memInfo[2]=new ProcessTreeSmapMemInfo("300");
memInfo[3]=new ProcessTreeSmapMemInfo("400");
createMemoryMappingInfo(memInfo);
writeStatFiles(procfsRootDir,pids,procInfos,memInfo);
Configuration conf=new Configuration();
ProcfsBasedProcessTree processTree=createProcessTree("100",procfsRootDir.getAbsolutePath());
processTree.setConf(conf);
processTree.updateProcessTree();
Assert.assertEquals("Cumulative virtual memory does not match",600000L,processTree.getCumulativeVmem());
long cumuRssMem=ProcfsBasedProcessTree.PAGE_SIZE > 0 ? 600L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
Assert.assertEquals("Cumulative rss memory does not match",cumuRssMem,processTree.getCumulativeRssmem());
long cumuCpuTime=ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ? 7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
Assert.assertEquals("Cumulative cpu time does not match",cumuCpuTime,processTree.getCumulativeCpuTime());
setSmapsInProceTree(processTree,true);
Assert.assertEquals("Cumulative rss memory does not match",(100 * KB_TO_BYTES * 3),processTree.getCumulativeRssmem());
procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","2000","300"});
procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","3000","500"});
writeStatFiles(procfsRootDir,pids,procInfos,memInfo);
processTree.updateProcessTree();
cumuCpuTime=ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ? 9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
Assert.assertEquals("Cumulative cpu time does not match",cumuCpuTime,processTree.getCumulativeCpuTime());
}
finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void tree(){
if (!Shell.WINDOWS) {
LOG.info("Platform not Windows. Not testing");
return;
}
assertTrue("WindowsBasedProcessTree should be available on Windows",WindowsBasedProcessTree.isAvailable());
WindowsBasedProcessTreeTester pTree=new WindowsBasedProcessTreeTester("-1");
pTree.infoStr="3524,1024,1024,500\r\n2844,1024,1024,500\r\n";
pTree.updateProcessTree();
assertTrue(pTree.getCumulativeVmem() == 2048);
assertTrue(pTree.getCumulativeVmem(0) == 2048);
assertTrue(pTree.getCumulativeRssmem() == 2048);
assertTrue(pTree.getCumulativeRssmem(0) == 2048);
assertTrue(pTree.getCumulativeCpuTime() == 1000);
pTree.infoStr="3524,1024,1024,1000\r\n2844,1024,1024,1000\r\n1234,1024,1024,1000\r\n";
pTree.updateProcessTree();
assertTrue(pTree.getCumulativeVmem() == 3072);
assertTrue(pTree.getCumulativeVmem(1) == 2048);
assertTrue(pTree.getCumulativeRssmem() == 3072);
assertTrue(pTree.getCumulativeRssmem(1) == 2048);
assertTrue(pTree.getCumulativeCpuTime() == 3000);
pTree.infoStr="3524,1024,1024,1500\r\n2844,1024,1024,1500\r\n";
pTree.updateProcessTree();
assertTrue(pTree.getCumulativeVmem() == 2048);
assertTrue(pTree.getCumulativeVmem(2) == 2048);
assertTrue(pTree.getCumulativeRssmem() == 2048);
assertTrue(pTree.getCumulativeRssmem(2) == 2048);
assertTrue(pTree.getCumulativeCpuTime() == 4000);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=20000) public void refreshAndCpuUsage() throws InterruptedException {
WindowsResourceCalculatorPluginTester tester=new WindowsResourceCalculatorPluginTester();
tester.infoStr="17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n";
tester.getAvailablePhysicalMemorySize();
assertTrue(tester.memAvailable == 6400417792L);
assertTrue(tester.cpuUsage == -1);
tester.infoStr="17177038848,8589467648,15232745472,5400417792,1,2805000,6261812\r\n";
tester.getAvailablePhysicalMemorySize();
assertTrue(tester.memAvailable == 6400417792L);
assertTrue(tester.cpuUsage == -1);
Thread.sleep(1500);
tester.infoStr="17177038848,8589467648,15232745472,5400417792,1,2805000,6286812\r\n";
tester.getAvailablePhysicalMemorySize();
assertTrue(tester.memAvailable == 5400417792L);
assertTrue(tester.cpuUsage >= 0.1);
}
InternalCallVerifier BooleanVerifier
@Test(timeout=30000) public void parseSystemInfoString(){
WindowsResourceCalculatorPluginTester tester=new WindowsResourceCalculatorPluginTester();
tester.infoStr="17177038848,8589467648,15232745472,6400417792,1,2805000,6261812\r\n";
tester.getAvailablePhysicalMemorySize();
assertTrue(tester.vmemSize == 17177038848L);
assertTrue(tester.memSize == 8589467648L);
assertTrue(tester.vmemAvailable == 15232745472L);
assertTrue(tester.memAvailable == 6400417792L);
assertTrue(tester.numProcessors == 1);
assertTrue(tester.cpuFrequencyKhz == 2805000L);
assertTrue(tester.cumulativeCpuTimeMs == 6261812L);
assertTrue(tester.cpuUsage == -1);
}
InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier
@Test(expected=org.apache.hadoop.yarn.webapp.WebAppException.class) public void testCreateWithNonZeroPort(){
WebApp app=WebApps.$for(this).at(50000).start();
int port=app.getListenerAddress().getPort();
assertEquals(50000,port);
WebApp app2=WebApps.$for(this).at(50000).start();
app.stop();
app2.stop();
}
APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testCreateWithPort(){
WebApp app=WebApps.$for(this).at(0).start();
int port=app.getListenerAddress().getPort();
assertTrue(port > 0);
app.stop();
app=WebApps.$for(this).at(port).start();
assertEquals(port,app.getListenerAddress().getPort());
app.stop();
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testServePathsNoName(){
WebApp app=WebApps.$for("",this).start();
assertEquals("/",app.getRedirectPath());
String[] expectedPaths={"/*"};
String[] pathSpecs=app.getServePathSpecs();
assertEquals(1,pathSpecs.length);
for (int i=0; i < expectedPaths.length; i++) {
assertTrue(ArrayUtils.contains(pathSpecs,expectedPaths[i]));
}
app.stop();
}
InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier
@Test(expected=org.apache.hadoop.yarn.webapp.WebAppException.class) public void testCreateWithBindAddressNonZeroPort(){
WebApp app=WebApps.$for(this).at("0.0.0.0:50000").start();
int port=app.getListenerAddress().getPort();
assertEquals(50000,port);
WebApp app2=WebApps.$for(this).at("0.0.0.0:50000").start();
app.stop();
app2.stop();
}
IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier
@Test public void testServePaths(){
WebApp app=WebApps.$for("test",this).start();
assertEquals("/test",app.getRedirectPath());
String[] expectedPaths={"/test","/test/*"};
String[] pathSpecs=app.getServePathSpecs();
assertEquals(2,pathSpecs.length);
for (int i=0; i < expectedPaths.length; i++) {
assertTrue(ArrayUtils.contains(pathSpecs,expectedPaths[i]));
}
app.stop();
}
InternalCallVerifier EqualityVerifier
@Test public void testTable(){
Hamlet h=newHamlet().title("test table").link("style.css");
TABLE t=h.table("#id");
for (int i=0; i < 3; ++i) {
t.tr().td("1").td("2")._();
}
t._();
PrintWriter out=h.getWriter();
out.flush();
assertEquals(0,h.nestLevel);
verify(out).print("");
verify(out,never()).print("");
verify(out,never()).print("");
}
InternalCallVerifier EqualityVerifier
@Test public void testEnumAttrs(){
Hamlet h=newHamlet().meta_http("Content-type","text/html; charset=utf-8").title("test enum attrs").link().$rel("stylesheet").$media(EnumSet.of(Media.screen,Media.print)).$type("text/css").$href("style.css")._().link().$rel(EnumSet.of(LinkType.index,LinkType.start)).$href("index.html")._();
h.div("#content")._("content")._();
PrintWriter out=h.getWriter();
out.flush();
assertEquals(0,h.nestLevel);
verify(out).print(" media=\"screen, print\"");
verify(out).print(" rel=\"start index\"");
}
InternalCallVerifier EqualityVerifier
@Test public void testPreformatted(){
Hamlet h=newHamlet().div().i("inline before pre").pre()._("pre text1\npre text2").i("inline in pre")._("pre text after inline")._().i("inline after pre")._();
PrintWriter out=h.getWriter();
out.flush();
assertEquals(5,h.indents);
}
InternalCallVerifier EqualityVerifier
@Test public void testScriptStyle(){
Hamlet h=newHamlet().script("a.js").script("b.js").style("h1 { font-size: 1.2em }");
PrintWriter out=h.getWriter();
out.flush();
assertEquals(0,h.nestLevel);
verify(out,times(2)).print(" type=\"text/javascript\"");
verify(out).print(" type=\"text/css\"");
}
InternalCallVerifier EqualityVerifier
@Test public void testSubViews(){
Hamlet h=newHamlet().title("test sub-views").div("#view1")._(TestView1.class)._().div("#view2")._(TestView2.class)._();
PrintWriter out=h.getWriter();
out.flush();
assertEquals(0,h.nestLevel);
verify(out).print("[" + TestView1.class.getName() + "]");
verify(out).print("[" + TestView2.class.getName() + "]");
}
InternalCallVerifier EqualityVerifier
@Test public void testHamlet(){
Hamlet h=newHamlet().title("test").h1("heading 1").p("#id.class").b("hello").em("world!")._().div("#footer")._("Brought to you by").a("http://hostname/","Somebody")._();
PrintWriter out=h.getWriter();
out.flush();
assertEquals(0,h.nestLevel);
verify(out).print("");
verify(out).print("");
verify(out).print("
");
verify(out).print("");
verify(out).print("");
verify(out,never()).print("
");
}
Class: org.apache.hadoop.yarn.webapp.hamlet.TestHamletImpl
InternalCallVerifier EqualityVerifier
/**
* Test the generic implementation methods
* @see TestHamlet for Hamlet syntax
*/
@Test public void testGeneric(){
PrintWriter out=spy(new PrintWriter(System.out));
HamletImpl hi=new HamletImpl(out,0,false);
hi.root("start")._attr("name","value")._("start text").elem("sub")._attr("name","value")._("sub text")._().elem("sub1")._noEndTag()._attr("boolean",null)._("sub1text")._()._("start text2").elem("pre")._pre()._("pre text").elem("i")._inline()._("inline")._()._().elem("i")._inline()._("inline after pre")._()._("start text3").elem("sub2")._("sub2text")._().elem("sub3")._noEndTag()._("sub3text")._().elem("sub4")._noEndTag().elem("i")._inline()._("inline")._()._("sub4text")._()._();
out.flush();
assertEquals(0,hi.nestLevel);
assertEquals(20,hi.indents);
verify(out).print("");
verify(out,never()).print("");
verify(out,never()).print("");
verify(out,never()).print("");
}
Class: org.apache.hadoop.yarn.webapp.test.TestWebAppTests
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCreateInjector() throws Exception {
Bar bar=new Bar();
Injector injector=WebAppTests.createMockInjector(Foo.class,bar);
logInstances(injector.getInstance(HttpServletRequest.class),injector.getInstance(HttpServletResponse.class),injector.getInstance(HttpServletResponse.class).getWriter());
assertSame(bar,injector.getInstance(Foo.class));
}
InternalCallVerifier IdentityVerifier
@Test public void testInstances() throws Exception {
Injector injector=WebAppTests.createMockInjector(this);
HttpServletRequest req=injector.getInstance(HttpServletRequest.class);
HttpServletResponse res=injector.getInstance(HttpServletResponse.class);
String val=req.getParameter("foo");
PrintWriter out=res.getWriter();
out.println("Hello world!");
logInstances(req,res,out);
assertSame(req,injector.getInstance(HttpServletRequest.class));
assertSame(res,injector.getInstance(HttpServletResponse.class));
assertSame(this,injector.getInstance(TestWebAppTests.class));
verify(req).getParameter("foo");
verify(res).getWriter();
verify(out).println("Hello world!");
}
APIUtilityVerifier InternalCallVerifier IdentityVerifier
@Test public void testCreateInjector2(){
final FooBar foobar=new FooBar();
Bar bar=new Bar();
Injector injector=WebAppTests.createMockInjector(Foo.class,bar,new AbstractModule(){
@Override protected void configure(){
bind(Bar.class).toInstance(foobar);
}
}
);
assertNotSame(bar,injector.getInstance(Bar.class));
assertSame(foobar,injector.getInstance(Bar.class));
}